diff --git a/go.mod b/go.mod index fb9e33110..eae7c6b91 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 exclude github.com/mattn/go-sqlite3 v2.0.3+incompatible require ( - cloud.google.com/go/compute v1.27.0 + cloud.google.com/go/compute v1.27.1 cloud.google.com/go/storage v1.42.0 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 @@ -16,7 +16,7 @@ require ( github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 github.com/BurntSushi/toml v1.4.0 - github.com/aws/aws-sdk-go v1.54.2 + github.com/aws/aws-sdk-go v1.54.10 github.com/coreos/go-semver v0.3.1 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf github.com/deepmap/oapi-codegen v1.8.2 @@ -36,7 +36,7 @@ require ( github.com/labstack/gommon v0.4.2 github.com/openshift-online/ocm-sdk-go v0.1.425 github.com/oracle/oci-go-sdk/v54 v54.0.0 - github.com/osbuild/images v0.66.0 + github.com/osbuild/images v0.69.0 github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1 github.com/osbuild/pulp-client v0.1.0 github.com/prometheus/client_golang v1.19.1 @@ -45,17 +45,17 @@ require ( github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453 - github.com/vmware/govmomi v0.37.3 - golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + github.com/vmware/govmomi v0.38.0 + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.21.0 - google.golang.org/api v0.183.0 + google.golang.org/api v0.186.0 ) require ( - cloud.google.com/go v0.114.0 // indirect - cloud.google.com/go/auth v0.5.1 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.6.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect @@ -71,8 +71,8 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/Microsoft/hcsshim v0.12.3 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -80,21 +80,21 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/cgroups/v3 v3.0.2 // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/containers/common v0.58.3 // indirect - github.com/containers/image/v5 v5.30.1 // indirect + github.com/containers/common v0.59.1 // indirect + github.com/containers/image/v5 v5.31.1 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/ocicrypt v1.1.9 // indirect - github.com/containers/storage v1.53.0 // indirect + github.com/containers/ocicrypt v1.1.10 // indirect + github.com/containers/storage v1.54.0 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.5 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/distribution/reference v0.5.0 // indirect + github.com/distribution/reference v0.6.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v26.1.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.1 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -104,33 +104,33 @@ require ( github.com/go-jose/go-jose/v3 v3.0.3 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.21.1 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/runtime v0.26.0 // indirect - github.com/go-openapi/spec v0.20.9 // indirect - github.com/go-openapi/strfmt v0.22.2 // indirect - github.com/go-openapi/swag v0.22.10 // indirect - github.com/go-openapi/validate v0.22.1 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-containerregistry v0.19.0 // indirect + github.com/google/go-containerregistry v0.19.1 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect github.com/gorilla/css v1.0.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.14.3 // indirect @@ -142,7 +142,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -151,13 +151,13 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/microcosm-cc/bluemonday v1.0.23 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/moby/sys/user v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -170,29 +170,29 @@ require ( github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.3 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.51.1 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rivo/uniseg v0.4.4 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/sigstore/fulcio v1.4.3 // indirect - github.com/sigstore/rekor v1.2.2 // indirect - github.com/sigstore/sigstore v1.8.2 // indirect + github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/rekor v1.3.6 // indirect + github.com/sigstore/sigstore v1.8.3 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.15.1 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect + github.com/sylabs/sif/v2 v2.16.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/ulikunitz/xz v0.5.11 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vbauerster/mpb/v8 v8.7.2 // indirect + github.com/vbauerster/mpb/v8 v8.7.3 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect @@ -208,11 +208,11 @@ require ( golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 7ca39625e..bd9cb4389 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= -cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= -cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.6.0 h1:5x+d6b5zdezZ7gmLWD1m/xNjnaQ2YDhmIz/HH3doy1g= +cloud.google.com/go/auth v0.6.0/go.mod h1:b4acV+jLQDyjwm4OXHYjNvRi4jvGBzHWJRtJcy+2P4g= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/compute v1.27.0 h1:EGawh2RUnfHT5g8f/FX3Ds6KZuIBC77hZoDrBvEZw94= -cloud.google.com/go/compute v1.27.0/go.mod h1:LG5HwRmWFKM2C5XxHRiNzkLLXW48WwvyVC0mfWsYPOM= +cloud.google.com/go/compute v1.27.1 h1:0WbBLIPNANheCRZ4h8QhgzjN53KMutbiVBOLtPiVzBU= +cloud.google.com/go/compute v1.27.1/go.mod h1:UVWm+bWKEKoM+PW2sZycP1Jgk3NhKwR2Iy2Cnp/G40I= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= @@ -72,21 +72,18 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= -github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.3 h1:LS9NXqXhMoqNCplK1ApmVSfB4UnVLRDWRapB6EIlxE0= +github.com/Microsoft/hcsshim v0.12.3/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.54.2 h1:Wo6AVWcleNHrYa48YzfYz60hzxGRqsJrK5s/qePe+3I= -github.com/aws/aws-sdk-go v1.54.2/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.54.10 h1:dvkMlAttUsyacKj2L4poIQBLzOSWL2JG2ty+yWrqets= +github.com/aws/aws-sdk-go v1.54.10/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -100,24 +97,24 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= -github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= +github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= -github.com/containers/common v0.58.3 h1:Iy/CdYjluEK926QT+ejonz7YvoRHazeW7BAiLIkmUQ4= -github.com/containers/common v0.58.3/go.mod h1:p4V1SNk+WOISgp01m+axuqCUxaDP3WSZPPzvnJnS/cQ= -github.com/containers/image/v5 v5.30.1 h1:AKrQMgOKI1oKx5FW5eoU2xoNyzACajHGx1O3qxobvFM= -github.com/containers/image/v5 v5.30.1/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk= +github.com/containers/common v0.59.1 h1:7VkmJN3YvD0jLFwaUjLHSRJ98JLffydiyOJjYr0dUTo= +github.com/containers/common v0.59.1/go.mod h1:53VicJCZ2AD0O+Br7VVoyrS7viXF4YmwlTIocWUT8XE= +github.com/containers/image/v5 v5.31.1 h1:3x9soI6Biml/GiDLpkSmKrkRSwVGctxu/vONpoUdklA= +github.com/containers/image/v5 v5.31.1/go.mod h1:5QfOqSackPkSbF7Qxc1DnVNnPJKQ+KWLkfEfDpK590Q= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= -github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM= -github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= -github.com/containers/storage v1.53.0 h1:VSES3C/u1pxjTJIXvLrSmyP7OBtDky04oGu07UvdTEA= -github.com/containers/storage v1.53.0/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8= +github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic= +github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8= +github.com/containers/storage v1.54.0 h1:xwYAlf6n9OnIlURQLLg3FYHbO74fQ/2W2N6EtQEUM4I= +github.com/containers/storage v1.54.0/go.mod h1:PlMOoinRrBSnhYODLxt4EXl0nmJt+X0kjG0Xdt9fMTw= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -130,24 +127,25 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM= github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= +github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= -github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc= +github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= +github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -187,73 +185,33 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.21.1 h1:rVisxQPdETctjlYntm0Ek4dKf68nAQocCloCT50vWuI= -github.com/go-openapi/errors v0.21.1/go.mod h1:LyiY9bgc7AVVh6wtVvMYEyoj3KJYNoRw92mmvnMWgj8= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.22.2 h1:DPYOrm6gexCfZZfXUaXFS4+Jw6HAaIIG0SZ5630f8yw= -github.com/go-openapi/strfmt v0.22.2/go.mod h1:HB/b7TCm91rno75Dembc1dFW/0FPLk5CEXsoF9ReNc4= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.10 h1:4y86NVn7Z2yYd6pfS4Z+Nyh3aAUL3Nul+LMbhFKy0gA= -github.com/go-openapi/swag v0.22.10/go.mod h1:Cnn8BYtRlx6BNE3DPN86f/xkapGIcLWzh3CLEb4C1jI= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= @@ -286,7 +244,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219 h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -294,32 +251,30 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic= -github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= +github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= -github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk= -github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g= github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= @@ -327,8 +282,8 @@ github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -340,9 +295,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/itchyny/gojq v0.12.7 h1:hYPTpeWfrJ1OT+2j6cvBScbhl0TkdwGM4bc66onUSOQ= @@ -406,20 +360,16 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= @@ -427,7 +377,6 @@ github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -453,11 +402,8 @@ github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ= github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -476,8 +422,6 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= -github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/microcosm-cc/bluemonday v1.0.23 h1:SMZe2IGa0NuHvnVNAZ+6B38gsTbi5e4sViiWJyDDqFY= @@ -488,10 +432,10 @@ github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPn github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= @@ -503,16 +447,14 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= -github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.18.0 h1:W9Y7IWXxPUpAit9ieMOLI7PJZGaW22DTKgiVAuhDTLc= +github.com/onsi/ginkgo/v2 v2.18.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -525,43 +467,38 @@ github.com/openshift-online/ocm-sdk-go v0.1.425 h1:QmT9XnbZc1/SKp4QkHe2dnsln87wy github.com/openshift-online/ocm-sdk-go v0.1.425/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y= github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4= github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc= -github.com/osbuild/images v0.66.0 h1:sQEf5Ny/II13A2d4WPuNd7xyVDc55D3T2Ec3ctH3/Bw= -github.com/osbuild/images v0.66.0/go.mod h1:kkiJNrd0XkVfwBxrJ8wWt6/d0+Eb+tG+zZVnw/xXE/8= +github.com/osbuild/images v0.69.0 h1:kSndbM+WBS9ycNvUY3ejwyKsPtCTRpndvqpzM0/Cgnk= +github.com/osbuild/images v0.69.0/go.mod h1:39yKeYqV0v9OMkmlHN42+4UHx/iKhbeu6zFcjvZlyno= github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1 h1:UFEJIcPa46W8gtWgOYzriRKYyy1t6SWL0BI7fPTuVvc= github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1/go.mod h1:z+WA+dX6qMwc7fqY5jCzESDIlg4WR2sBQezxsoXv9Ik= github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8= github.com/osbuild/pulp-client v0.1.0/go.mod h1:rd/MLdfwwO2cQI1s056h8z32zAi3Bo90XhlAAryIvWc= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M= github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= -github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= -github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0= github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= +github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -581,13 +518,12 @@ github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NF github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ= -github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og= -github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= -github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= -github.com/sigstore/sigstore v1.8.2 h1:0Ttjcn3V0fVQXlYq7+oHaaHkGFIt3ywm7SF4JTU/l8c= -github.com/sigstore/sigstore v1.8.2/go.mod h1:CHVcSyknCcjI4K2ZhS1SI28r0tcQyBlwtALG536x1DY= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= +github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= +github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= +github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4= +github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -596,14 +532,12 @@ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EE github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0= github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -622,19 +556,18 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0= -github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ= +github.com/sylabs/sif/v2 v2.16.0 h1:2eqaBaQQsn5DZTzm3QZm0HupZQEjNXfxRnCmtyCihEU= +github.com/sylabs/sif/v2 v2.16.0/go.mod h1:d5TxgD/mhMUU3kWLmZmWJQ99Wg0asaTP0bq3ezR1xpg= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453 h1:rN0NwUFS6oK9ESlk2QyKfucb/gL4opUutNlCS2bBlvA= github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453/go.mod h1:s59e1aOY3F3KNsRx5W8cMdbtbt49aSKL7alLp6EKn48= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= @@ -643,29 +576,20 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.7.2 h1:SMJtxhNho1MV3OuFgS1DAzhANN1Ejc5Ct+0iSaIkB14= -github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YHgVNpR10VY= -github.com/vmware/govmomi v0.37.3 h1:L2y2Ba09tYiZwdPtdF64Ox9QZeJ8vlCUGcAF9SdODn4= -github.com/vmware/govmomi v0.37.3/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/vbauerster/mpb/v8 v8.7.3 h1:n/mKPBav4FFWp5fH4U0lPpXfiOmCEgl5Yx/NM3tKJA0= +github.com/vbauerster/mpb/v8 v8.7.3/go.mod h1:9nFlNpDGVoTmQ4QvNjSLtwLmAFjwmq0XaAF26toHGNM= +github.com/vmware/govmomi v0.38.0 h1:UvQpLAOjDpO0JUxoPCXnEzOlEa/9kejO6K58qOFr6cM= +github.com/vmware/govmomi v0.38.0/go.mod h1:mtGWtM+YhTADHlCgJBiskSRPOZRsN9MSjPzaZLte/oQ= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= @@ -678,10 +602,10 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= @@ -701,14 +625,11 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -716,7 +637,6 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= @@ -726,8 +646,8 @@ golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZP golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -753,7 +673,6 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -766,12 +685,9 @@ golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= @@ -782,9 +698,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -794,7 +708,6 @@ golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -838,12 +751,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -865,19 +774,19 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= -google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= +google.golang.org/api v0.186.0 h1:n2OPp+PPXX0Axh4GuSsL5QL8xQCTb2oDwyzPnQvqUug= +google.golang.org/api v0.186.0/go.mod h1:hvRbBmgoje49RV3xqVXrmP6w93n6ehGgIVPYrGtBFFc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= -google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU= -google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 h1:CUiCqkPw1nNrNQzCCG4WA65m0nAmQiwXHpub3dNyruU= +google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4/go.mod h1:EvuUDCulqGgV80RvP1BHuom+smhX4qtlhnNatHuroGQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= +google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 h1:Di6ANFilr+S60a4S61ZM00vLdw0IrQOSMS2/6mrnOU0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -894,11 +803,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -913,9 +821,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 7ef5fc0de..797589934 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,24 @@ # Changelog +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25) + + +### Features + +* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e)) + + +### Bug Fixes + +* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05)) + +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24) + + +### Bug Fixes + +* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414) + ## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index d579e482e..36729b604 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -44,6 +44,21 @@ const ( universeDomainDefault = "googleapis.com" ) +// tokenState represents different states for a [Token]. +type tokenState int + +const ( + // fresh indicates that the [Token] is valid. It is not expired or close to + // expired, or the token has no expiry. + fresh tokenState = iota + // stale indicates that the [Token] is close to expired, and should be + // refreshed. The token can be used normally. + stale + // invalid indicates that the [Token] is expired or invalid. The token + // cannot be used for a normal operation. + invalid +) + var ( defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType} @@ -81,13 +96,13 @@ type Token struct { // IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not // expired. A token is considered expired if [Token.Expiry] has passed or will -// pass in the next 10 seconds. +// pass in the next 225 seconds. func (t *Token) IsValid() bool { return t.isValidWithEarlyExpiry(defaultExpiryDelta) } func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { - if t == nil || t.Value == "" { + if t.isEmpty() { return false } if t.Expiry.IsZero() { @@ -96,6 +111,10 @@ func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow()) } +func (t *Token) isEmpty() bool { + return t == nil || t.Value == "" +} + // Credentials holds Google credentials, including // [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). type Credentials struct { @@ -206,11 +225,15 @@ func NewCredentials(opts *CredentialsOptions) *Credentials { // CachedTokenProvider. type CachedTokenProviderOptions struct { // DisableAutoRefresh makes the TokenProvider always return the same token, - // even if it is expired. + // even if it is expired. The default is false. Optional. DisableAutoRefresh bool // ExpireEarly configures the amount of time before a token expires, that it - // should be refreshed. If unset, the default value is 10 seconds. + // should be refreshed. If unset, the default value is 3 minutes and 45 + // seconds. Optional. ExpireEarly time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool } func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { @@ -227,34 +250,126 @@ func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { return ctpo.ExpireEarly } +func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool { + if ctpo == nil { + return false + } + return ctpo.DisableAsyncRefresh +} + // NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned -// by the underlying provider. By default it will refresh tokens ten seconds -// before they expire, but this time can be configured with the optional -// options. +// by the underlying provider. By default it will refresh tokens asynchronously +// (non-blocking mode) within a window that starts 3 minutes and 45 seconds +// before they expire. The asynchronous (non-blocking) refresh can be changed to +// a synchronous (blocking) refresh using the +// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry +// duration can be configured using the CachedTokenProviderOptions.ExpireEarly +// option. func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider { if ctp, ok := tp.(*cachedTokenProvider); ok { return ctp } return &cachedTokenProvider{ - tp: tp, - autoRefresh: opts.autoRefresh(), - expireEarly: opts.expireEarly(), + tp: tp, + autoRefresh: opts.autoRefresh(), + expireEarly: opts.expireEarly(), + blockingRefresh: opts.blockingRefresh(), } } type cachedTokenProvider struct { - tp TokenProvider - autoRefresh bool - expireEarly time.Duration + tp TokenProvider + autoRefresh bool + expireEarly time.Duration + blockingRefresh bool mu sync.Mutex cachedToken *Token + // isRefreshRunning ensures that the non-blocking refresh will only be + // attempted once, even if multiple callers enter the Token method. + isRefreshRunning bool + // isRefreshErr ensures that the non-blocking refresh will only be attempted + // once per refresh window if an error is encountered. + isRefreshErr bool } func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) { + if c.blockingRefresh { + return c.tokenBlocking(ctx) + } + return c.tokenNonBlocking(ctx) +} + +func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) { + switch c.tokenState() { + case fresh: + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + case stale: + c.tokenAsync(ctx) + // Return the stale token immediately to not block customer requests to Cloud services. + c.mu.Lock() + defer c.mu.Unlock() + return c.cachedToken, nil + default: // invalid + return c.tokenBlocking(ctx) + } +} + +// tokenState reports the token's validity. +func (c *cachedTokenProvider) tokenState() tokenState { c.mu.Lock() defer c.mu.Unlock() - if c.cachedToken.IsValid() || !c.autoRefresh { + t := c.cachedToken + if t == nil || t.Value == "" { + return invalid + } else if t.Expiry.IsZero() { + return fresh + } else if timeNow().After(t.Expiry.Round(0)) { + return invalid + } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + return stale + } + return fresh +} + +// tokenAsync uses a bool to ensure that only one non-blocking token refresh +// happens at a time, even if multiple callers have entered this function +// concurrently. This avoids creating an arbitrary number of concurrent +// goroutines. Retries should be attempted and managed within the Token method. +// If the refresh attempt fails, no further attempts are made until the refresh +// window expires and the token enters the invalid state, at which point the +// blocking call to Token should likely return the same error on the main goroutine. +func (c *cachedTokenProvider) tokenAsync(ctx context.Context) { + fn := func() { + c.mu.Lock() + c.isRefreshRunning = true + c.mu.Unlock() + t, err := c.tp.Token(ctx) + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshRunning = false + if err != nil { + // Discard errors from the non-blocking refresh, but prevent further + // attempts. + c.isRefreshErr = true + return + } + c.cachedToken = t + } + c.mu.Lock() + defer c.mu.Unlock() + if !c.isRefreshRunning && !c.isRefreshErr { + go fn() + } +} + +func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) { + c.mu.Lock() + defer c.mu.Unlock() + c.isRefreshErr = false + if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) { return c.cachedToken, nil } t, err := c.tp.Token(ctx) diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index f3ec88824..6f70fa353 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,9 +37,10 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -func computeTokenProvider(earlyExpiry time.Duration, scope ...string) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: scope}, &auth.CachedTokenProviderOptions{ - ExpireEarly: earlyExpiry, +func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { + return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ + ExpireEarly: opts.EarlyTokenRefresh, + DisableAsyncRefresh: opts.DisableAsyncRefresh, }) } diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index cb3f44f58..c4728da3a 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -80,9 +80,11 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { return readCredentialsFile(opts.CredentialsFile, opts) } if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" { - if creds, err := readCredentialsFile(filename, opts); err == nil { - return creds, err + creds, err := readCredentialsFile(filename, opts) + if err != nil { + return nil, err } + return creds, nil } fileName := credsfile.GetWellKnownFileName() @@ -92,7 +94,7 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if OnGCE() { return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts.EarlyTokenRefresh, opts.Scopes...), + TokenProvider: computeTokenProvider(opts), ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { return metadata.ProjectID() }), @@ -116,8 +118,13 @@ type DetectOptions struct { // Optional. Subject string // EarlyTokenRefresh configures how early before a token expires that it - // should be refreshed. + // should be refreshed. Once the token’s time until expiration has entered + // this refresh window the token is considered valid but stale. If unset, + // the default value is 3 minutes and 45 seconds. Optional. EarlyTokenRefresh time.Duration + // DisableAsyncRefresh configures a synchronous workflow that refreshes + // stale tokens while blocking. The default is false. Optional. + DisableAsyncRefresh bool // AuthHandlerOptions configures an authorization handler and other options // for 3LO flows. It is required, and only used, for client credential // flows. diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index 1b8d83c4b..1ccdeff84 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -62,7 +62,8 @@ type Options3LO struct { // Optional. Client *http.Client // EarlyTokenExpiry is the time before the token expires that it should be - // refreshed. If not set the default value is 10 seconds. Optional. + // refreshed. If not set the default value is 3 minutes and 45 seconds. + // Optional. EarlyTokenExpiry time.Duration // AuthHandlerOpts provides a set of options for doing a diff --git a/vendor/cloud.google.com/go/compute/apiv1/accelerator_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/accelerator_types_client.go index 6b7b32cbd..c7443f561 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/accelerator_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/accelerator_types_client.go @@ -196,6 +196,7 @@ func defaultAcceleratorTypesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -205,7 +206,9 @@ func defaultAcceleratorTypesRESTClientOptions() []option.ClientOption { func (c *acceleratorTypesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/addresses_client.go b/vendor/cloud.google.com/go/compute/apiv1/addresses_client.go index 53113163d..778101e77 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/addresses_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/addresses_client.go @@ -246,6 +246,7 @@ func defaultAddressesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -255,7 +256,9 @@ func defaultAddressesRESTClientOptions() []option.ClientOption { func (c *addressesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/autoscalers_client.go b/vendor/cloud.google.com/go/compute/apiv1/autoscalers_client.go index 683adc948..02231166a 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/autoscalers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/autoscalers_client.go @@ -246,6 +246,7 @@ func defaultAutoscalersRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -255,7 +256,9 @@ func defaultAutoscalersRESTClientOptions() []option.ClientOption { func (c *autoscalersRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/backend_buckets_client.go b/vendor/cloud.google.com/go/compute/apiv1/backend_buckets_client.go index 602835737..3b2f18db4 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/backend_buckets_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/backend_buckets_client.go @@ -295,6 +295,7 @@ func defaultBackendBucketsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -304,7 +305,9 @@ func defaultBackendBucketsRESTClientOptions() []option.ClientOption { func (c *backendBucketsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/backend_services_client.go b/vendor/cloud.google.com/go/compute/apiv1/backend_services_client.go index ccf3dd8cc..b05fc78b0 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/backend_services_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/backend_services_client.go @@ -354,6 +354,7 @@ func defaultBackendServicesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -363,7 +364,9 @@ func defaultBackendServicesRESTClientOptions() []option.ClientOption { func (c *backendServicesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/computepb/compute.pb.go b/vendor/cloud.google.com/go/compute/apiv1/computepb/compute.pb.go index 4712f0262..56335a96d 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/computepb/compute.pb.go +++ b/vendor/cloud.google.com/go/compute/apiv1/computepb/compute.pb.go @@ -20,7 +20,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.2 // protoc v4.25.3 // source: google/cloud/compute/v1/compute.proto @@ -190575,7 +190575,7 @@ func file_google_cloud_compute_v1_compute_proto_rawDescGZIP() []byte { var file_google_cloud_compute_v1_compute_proto_enumTypes = make([]protoimpl.EnumInfo, 320) var file_google_cloud_compute_v1_compute_proto_msgTypes = make([]protoimpl.MessageInfo, 1597) -var file_google_cloud_compute_v1_compute_proto_goTypes = []interface{}{ +var file_google_cloud_compute_v1_compute_proto_goTypes = []any{ (AccessConfig_NetworkTier)(0), // 0: google.cloud.compute.v1.AccessConfig.NetworkTier (AccessConfig_Type)(0), // 1: google.cloud.compute.v1.AccessConfig.Type (Address_AddressType)(0), // 2: google.cloud.compute.v1.Address.AddressType @@ -195349,7 +195349,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_cloud_compute_v1_compute_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AWSV4Signature); i { case 0: return &v.state @@ -195361,7 +195361,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AbandonInstancesInstanceGroupManagerRequest); i { case 0: return &v.state @@ -195373,7 +195373,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*AbandonInstancesRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -195385,7 +195385,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*AcceleratorConfig); i { case 0: return &v.state @@ -195397,7 +195397,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*AcceleratorType); i { case 0: return &v.state @@ -195409,7 +195409,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*AcceleratorTypeAggregatedList); i { case 0: return &v.state @@ -195421,7 +195421,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*AcceleratorTypeList); i { case 0: return &v.state @@ -195433,7 +195433,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*AcceleratorTypesScopedList); i { case 0: return &v.state @@ -195445,7 +195445,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*Accelerators); i { case 0: return &v.state @@ -195457,7 +195457,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*AccessConfig); i { case 0: return &v.state @@ -195469,7 +195469,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*AddAccessConfigInstanceRequest); i { case 0: return &v.state @@ -195481,7 +195481,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*AddAssociationFirewallPolicyRequest); i { case 0: return &v.state @@ -195493,7 +195493,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*AddAssociationNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -195505,7 +195505,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*AddAssociationRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -195517,7 +195517,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*AddHealthCheckTargetPoolRequest); i { case 0: return &v.state @@ -195529,7 +195529,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*AddInstanceTargetPoolRequest); i { case 0: return &v.state @@ -195541,7 +195541,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*AddInstancesInstanceGroupRequest); i { case 0: return &v.state @@ -195553,7 +195553,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*AddNodesNodeGroupRequest); i { case 0: return &v.state @@ -195565,7 +195565,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*AddPeeringNetworkRequest); i { case 0: return &v.state @@ -195577,7 +195577,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*AddResourcePoliciesDiskRequest); i { case 0: return &v.state @@ -195589,7 +195589,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*AddResourcePoliciesInstanceRequest); i { case 0: return &v.state @@ -195601,7 +195601,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*AddResourcePoliciesRegionDiskRequest); i { case 0: return &v.state @@ -195613,7 +195613,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*AddRuleFirewallPolicyRequest); i { case 0: return &v.state @@ -195625,7 +195625,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*AddRuleNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -195637,7 +195637,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*AddRuleRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -195649,7 +195649,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*AddRuleRegionSecurityPolicyRequest); i { case 0: return &v.state @@ -195661,7 +195661,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*AddRuleSecurityPolicyRequest); i { case 0: return &v.state @@ -195673,7 +195673,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*AddSignedUrlKeyBackendBucketRequest); i { case 0: return &v.state @@ -195685,7 +195685,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*AddSignedUrlKeyBackendServiceRequest); i { case 0: return &v.state @@ -195697,7 +195697,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -195709,7 +195709,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*AddressAggregatedList); i { case 0: return &v.state @@ -195721,7 +195721,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*AddressList); i { case 0: return &v.state @@ -195733,7 +195733,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*AddressesScopedList); i { case 0: return &v.state @@ -195745,7 +195745,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[33].Exporter = func(v any, i int) any { switch v := v.(*AdvancedMachineFeatures); i { case 0: return &v.state @@ -195757,7 +195757,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[34].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListAcceleratorTypesRequest); i { case 0: return &v.state @@ -195769,7 +195769,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[35].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListAddressesRequest); i { case 0: return &v.state @@ -195781,7 +195781,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[36].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListAutoscalersRequest); i { case 0: return &v.state @@ -195793,7 +195793,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[37].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListBackendServicesRequest); i { case 0: return &v.state @@ -195805,7 +195805,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[38].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListDiskTypesRequest); i { case 0: return &v.state @@ -195817,7 +195817,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[39].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListDisksRequest); i { case 0: return &v.state @@ -195829,7 +195829,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[40].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListForwardingRulesRequest); i { case 0: return &v.state @@ -195841,7 +195841,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[41].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListGlobalOperationsRequest); i { case 0: return &v.state @@ -195853,7 +195853,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[42].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListHealthChecksRequest); i { case 0: return &v.state @@ -195865,7 +195865,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[43].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListInstanceGroupManagersRequest); i { case 0: return &v.state @@ -195877,7 +195877,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[44].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListInstanceGroupsRequest); i { case 0: return &v.state @@ -195889,7 +195889,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[45].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListInstanceTemplatesRequest); i { case 0: return &v.state @@ -195901,7 +195901,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[46].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListInstancesRequest); i { case 0: return &v.state @@ -195913,7 +195913,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[47].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListInstantSnapshotsRequest); i { case 0: return &v.state @@ -195925,7 +195925,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[48].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListInterconnectAttachmentsRequest); i { case 0: return &v.state @@ -195937,7 +195937,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[49].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListMachineTypesRequest); i { case 0: return &v.state @@ -195949,7 +195949,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[50].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListNetworkAttachmentsRequest); i { case 0: return &v.state @@ -195961,7 +195961,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[51].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListNetworkEdgeSecurityServicesRequest); i { case 0: return &v.state @@ -195973,7 +195973,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[52].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListNetworkEndpointGroupsRequest); i { case 0: return &v.state @@ -195985,7 +195985,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[53].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListNodeGroupsRequest); i { case 0: return &v.state @@ -195997,7 +195997,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[54].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListNodeTemplatesRequest); i { case 0: return &v.state @@ -196009,7 +196009,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[55].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListNodeTypesRequest); i { case 0: return &v.state @@ -196021,7 +196021,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[56].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListPacketMirroringsRequest); i { case 0: return &v.state @@ -196033,7 +196033,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[57].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListPublicDelegatedPrefixesRequest); i { case 0: return &v.state @@ -196045,7 +196045,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[58].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListRegionCommitmentsRequest); i { case 0: return &v.state @@ -196057,7 +196057,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[59].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListReservationsRequest); i { case 0: return &v.state @@ -196069,7 +196069,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[60].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListResourcePoliciesRequest); i { case 0: return &v.state @@ -196081,7 +196081,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[61].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListRoutersRequest); i { case 0: return &v.state @@ -196093,7 +196093,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[62].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListSecurityPoliciesRequest); i { case 0: return &v.state @@ -196105,7 +196105,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[63].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListServiceAttachmentsRequest); i { case 0: return &v.state @@ -196117,7 +196117,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[64].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListSslCertificatesRequest); i { case 0: return &v.state @@ -196129,7 +196129,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[65].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListSslPoliciesRequest); i { case 0: return &v.state @@ -196141,7 +196141,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[66].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListStoragePoolTypesRequest); i { case 0: return &v.state @@ -196153,7 +196153,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[67].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListStoragePoolsRequest); i { case 0: return &v.state @@ -196165,7 +196165,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[68].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListSubnetworksRequest); i { case 0: return &v.state @@ -196177,7 +196177,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[69].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListTargetHttpProxiesRequest); i { case 0: return &v.state @@ -196189,7 +196189,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[70].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListTargetHttpsProxiesRequest); i { case 0: return &v.state @@ -196201,7 +196201,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[71].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListTargetInstancesRequest); i { case 0: return &v.state @@ -196213,7 +196213,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[72].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListTargetPoolsRequest); i { case 0: return &v.state @@ -196225,7 +196225,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[73].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListTargetTcpProxiesRequest); i { case 0: return &v.state @@ -196237,7 +196237,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[74].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListTargetVpnGatewaysRequest); i { case 0: return &v.state @@ -196249,7 +196249,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[75].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListUrlMapsRequest); i { case 0: return &v.state @@ -196261,7 +196261,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[76].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListVpnGatewaysRequest); i { case 0: return &v.state @@ -196273,7 +196273,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[77].Exporter = func(v any, i int) any { switch v := v.(*AggregatedListVpnTunnelsRequest); i { case 0: return &v.state @@ -196285,7 +196285,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[78].Exporter = func(v any, i int) any { switch v := v.(*AliasIpRange); i { case 0: return &v.state @@ -196297,7 +196297,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[79].Exporter = func(v any, i int) any { switch v := v.(*AllocationAggregateReservation); i { case 0: return &v.state @@ -196309,7 +196309,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[80].Exporter = func(v any, i int) any { switch v := v.(*AllocationAggregateReservationReservedResourceInfo); i { case 0: return &v.state @@ -196321,7 +196321,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[81].Exporter = func(v any, i int) any { switch v := v.(*AllocationAggregateReservationReservedResourceInfoAccelerator); i { case 0: return &v.state @@ -196333,7 +196333,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[82].Exporter = func(v any, i int) any { switch v := v.(*AllocationResourceStatus); i { case 0: return &v.state @@ -196345,7 +196345,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[83].Exporter = func(v any, i int) any { switch v := v.(*AllocationResourceStatusSpecificSKUAllocation); i { case 0: return &v.state @@ -196357,7 +196357,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[84].Exporter = func(v any, i int) any { switch v := v.(*AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk); i { case 0: return &v.state @@ -196369,7 +196369,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[85].Exporter = func(v any, i int) any { switch v := v.(*AllocationSpecificSKUAllocationReservedInstanceProperties); i { case 0: return &v.state @@ -196381,7 +196381,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[86].Exporter = func(v any, i int) any { switch v := v.(*AllocationSpecificSKUReservation); i { case 0: return &v.state @@ -196393,7 +196393,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[87].Exporter = func(v any, i int) any { switch v := v.(*Allowed); i { case 0: return &v.state @@ -196405,7 +196405,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[88].Exporter = func(v any, i int) any { switch v := v.(*AnnouncePublicAdvertisedPrefixeRequest); i { case 0: return &v.state @@ -196417,7 +196417,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[89].Exporter = func(v any, i int) any { switch v := v.(*AnnouncePublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -196429,7 +196429,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[90].Exporter = func(v any, i int) any { switch v := v.(*ApplyUpdatesToInstancesInstanceGroupManagerRequest); i { case 0: return &v.state @@ -196441,7 +196441,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[91].Exporter = func(v any, i int) any { switch v := v.(*ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -196453,7 +196453,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[92].Exporter = func(v any, i int) any { switch v := v.(*AttachDiskInstanceRequest); i { case 0: return &v.state @@ -196465,7 +196465,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[93].Exporter = func(v any, i int) any { switch v := v.(*AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -196477,7 +196477,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[94].Exporter = func(v any, i int) any { switch v := v.(*AttachNetworkEndpointsNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -196489,7 +196489,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[95].Exporter = func(v any, i int) any { switch v := v.(*AttachNetworkEndpointsRegionNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -196501,7 +196501,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[96].Exporter = func(v any, i int) any { switch v := v.(*AttachedDisk); i { case 0: return &v.state @@ -196513,7 +196513,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[97].Exporter = func(v any, i int) any { switch v := v.(*AttachedDiskInitializeParams); i { case 0: return &v.state @@ -196525,7 +196525,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[98].Exporter = func(v any, i int) any { switch v := v.(*AuditConfig); i { case 0: return &v.state @@ -196537,7 +196537,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[99].Exporter = func(v any, i int) any { switch v := v.(*AuditLogConfig); i { case 0: return &v.state @@ -196549,7 +196549,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[100].Exporter = func(v any, i int) any { switch v := v.(*AuthorizationLoggingOptions); i { case 0: return &v.state @@ -196561,7 +196561,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[101].Exporter = func(v any, i int) any { switch v := v.(*Autoscaler); i { case 0: return &v.state @@ -196573,7 +196573,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[102].Exporter = func(v any, i int) any { switch v := v.(*AutoscalerAggregatedList); i { case 0: return &v.state @@ -196585,7 +196585,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[103].Exporter = func(v any, i int) any { switch v := v.(*AutoscalerList); i { case 0: return &v.state @@ -196597,7 +196597,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[104].Exporter = func(v any, i int) any { switch v := v.(*AutoscalerStatusDetails); i { case 0: return &v.state @@ -196609,7 +196609,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[105].Exporter = func(v any, i int) any { switch v := v.(*AutoscalersScopedList); i { case 0: return &v.state @@ -196621,7 +196621,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[106].Exporter = func(v any, i int) any { switch v := v.(*AutoscalingPolicy); i { case 0: return &v.state @@ -196633,7 +196633,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[107].Exporter = func(v any, i int) any { switch v := v.(*AutoscalingPolicyCpuUtilization); i { case 0: return &v.state @@ -196645,7 +196645,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[108].Exporter = func(v any, i int) any { switch v := v.(*AutoscalingPolicyCustomMetricUtilization); i { case 0: return &v.state @@ -196657,7 +196657,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[109].Exporter = func(v any, i int) any { switch v := v.(*AutoscalingPolicyLoadBalancingUtilization); i { case 0: return &v.state @@ -196669,7 +196669,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[110].Exporter = func(v any, i int) any { switch v := v.(*AutoscalingPolicyScaleInControl); i { case 0: return &v.state @@ -196681,7 +196681,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[111].Exporter = func(v any, i int) any { switch v := v.(*AutoscalingPolicyScalingSchedule); i { case 0: return &v.state @@ -196693,7 +196693,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[112].Exporter = func(v any, i int) any { switch v := v.(*Backend); i { case 0: return &v.state @@ -196705,7 +196705,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[113].Exporter = func(v any, i int) any { switch v := v.(*BackendBucket); i { case 0: return &v.state @@ -196717,7 +196717,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[114].Exporter = func(v any, i int) any { switch v := v.(*BackendBucketCdnPolicy); i { case 0: return &v.state @@ -196729,7 +196729,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[115].Exporter = func(v any, i int) any { switch v := v.(*BackendBucketCdnPolicyBypassCacheOnRequestHeader); i { case 0: return &v.state @@ -196741,7 +196741,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[116].Exporter = func(v any, i int) any { switch v := v.(*BackendBucketCdnPolicyCacheKeyPolicy); i { case 0: return &v.state @@ -196753,7 +196753,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[117].Exporter = func(v any, i int) any { switch v := v.(*BackendBucketCdnPolicyNegativeCachingPolicy); i { case 0: return &v.state @@ -196765,7 +196765,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[118].Exporter = func(v any, i int) any { switch v := v.(*BackendBucketList); i { case 0: return &v.state @@ -196777,7 +196777,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[119].Exporter = func(v any, i int) any { switch v := v.(*BackendService); i { case 0: return &v.state @@ -196789,7 +196789,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[120].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceAggregatedList); i { case 0: return &v.state @@ -196801,7 +196801,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[121].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceCdnPolicy); i { case 0: return &v.state @@ -196813,7 +196813,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[122].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceCdnPolicyBypassCacheOnRequestHeader); i { case 0: return &v.state @@ -196825,7 +196825,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[123].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceCdnPolicyNegativeCachingPolicy); i { case 0: return &v.state @@ -196837,7 +196837,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[124].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceConnectionTrackingPolicy); i { case 0: return &v.state @@ -196849,7 +196849,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[125].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceFailoverPolicy); i { case 0: return &v.state @@ -196861,7 +196861,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[126].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceGroupHealth); i { case 0: return &v.state @@ -196873,7 +196873,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[127].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceIAP); i { case 0: return &v.state @@ -196885,7 +196885,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[128].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceList); i { case 0: return &v.state @@ -196897,7 +196897,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[129].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceListUsable); i { case 0: return &v.state @@ -196909,7 +196909,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[130].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceLocalityLoadBalancingPolicyConfig); i { case 0: return &v.state @@ -196921,7 +196921,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[131].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy); i { case 0: return &v.state @@ -196933,7 +196933,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[132].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceLocalityLoadBalancingPolicyConfigPolicy); i { case 0: return &v.state @@ -196945,7 +196945,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[133].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceLogConfig); i { case 0: return &v.state @@ -196957,7 +196957,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[134].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceReference); i { case 0: return &v.state @@ -196969,7 +196969,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[135].Exporter = func(v any, i int) any { switch v := v.(*BackendServiceUsedBy); i { case 0: return &v.state @@ -196981,7 +196981,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[136].Exporter = func(v any, i int) any { switch v := v.(*BackendServicesScopedList); i { case 0: return &v.state @@ -196993,7 +196993,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[137].Exporter = func(v any, i int) any { switch v := v.(*BfdPacket); i { case 0: return &v.state @@ -197005,7 +197005,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[138].Exporter = func(v any, i int) any { switch v := v.(*BfdStatus); i { case 0: return &v.state @@ -197017,7 +197017,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[139].Exporter = func(v any, i int) any { switch v := v.(*BfdStatusPacketCounts); i { case 0: return &v.state @@ -197029,7 +197029,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[140].Exporter = func(v any, i int) any { switch v := v.(*Binding); i { case 0: return &v.state @@ -197041,7 +197041,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[141].Exporter = func(v any, i int) any { switch v := v.(*BulkInsertDiskRequest); i { case 0: return &v.state @@ -197053,7 +197053,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[142].Exporter = func(v any, i int) any { switch v := v.(*BulkInsertDiskResource); i { case 0: return &v.state @@ -197065,7 +197065,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[143].Exporter = func(v any, i int) any { switch v := v.(*BulkInsertInstanceRequest); i { case 0: return &v.state @@ -197077,7 +197077,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[144].Exporter = func(v any, i int) any { switch v := v.(*BulkInsertInstanceResource); i { case 0: return &v.state @@ -197089,7 +197089,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[145].Exporter = func(v any, i int) any { switch v := v.(*BulkInsertInstanceResourcePerInstanceProperties); i { case 0: return &v.state @@ -197101,7 +197101,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[146].Exporter = func(v any, i int) any { switch v := v.(*BulkInsertOperationStatus); i { case 0: return &v.state @@ -197113,7 +197113,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[147].Exporter = func(v any, i int) any { switch v := v.(*BulkInsertRegionDiskRequest); i { case 0: return &v.state @@ -197125,7 +197125,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[148].Exporter = func(v any, i int) any { switch v := v.(*BulkInsertRegionInstanceRequest); i { case 0: return &v.state @@ -197137,7 +197137,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[149].Exporter = func(v any, i int) any { switch v := v.(*CacheInvalidationRule); i { case 0: return &v.state @@ -197149,7 +197149,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[150].Exporter = func(v any, i int) any { switch v := v.(*CacheKeyPolicy); i { case 0: return &v.state @@ -197161,7 +197161,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[151].Exporter = func(v any, i int) any { switch v := v.(*CancelInstanceGroupManagerResizeRequestRequest); i { case 0: return &v.state @@ -197173,7 +197173,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[152].Exporter = func(v any, i int) any { switch v := v.(*CircuitBreakers); i { case 0: return &v.state @@ -197185,7 +197185,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[153].Exporter = func(v any, i int) any { switch v := v.(*CloneRulesFirewallPolicyRequest); i { case 0: return &v.state @@ -197197,7 +197197,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[154].Exporter = func(v any, i int) any { switch v := v.(*CloneRulesNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -197209,7 +197209,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[155].Exporter = func(v any, i int) any { switch v := v.(*CloneRulesRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -197221,7 +197221,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[156].Exporter = func(v any, i int) any { switch v := v.(*Commitment); i { case 0: return &v.state @@ -197233,7 +197233,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[157].Exporter = func(v any, i int) any { switch v := v.(*CommitmentAggregatedList); i { case 0: return &v.state @@ -197245,7 +197245,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[158].Exporter = func(v any, i int) any { switch v := v.(*CommitmentList); i { case 0: return &v.state @@ -197257,7 +197257,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[159].Exporter = func(v any, i int) any { switch v := v.(*CommitmentsScopedList); i { case 0: return &v.state @@ -197269,7 +197269,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[160].Exporter = func(v any, i int) any { switch v := v.(*Condition); i { case 0: return &v.state @@ -197281,7 +197281,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[161].Exporter = func(v any, i int) any { switch v := v.(*ConfidentialInstanceConfig); i { case 0: return &v.state @@ -197293,7 +197293,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[162].Exporter = func(v any, i int) any { switch v := v.(*ConnectionDraining); i { case 0: return &v.state @@ -197305,7 +197305,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[163].Exporter = func(v any, i int) any { switch v := v.(*ConsistentHashLoadBalancerSettings); i { case 0: return &v.state @@ -197317,7 +197317,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[164].Exporter = func(v any, i int) any { switch v := v.(*ConsistentHashLoadBalancerSettingsHttpCookie); i { case 0: return &v.state @@ -197329,7 +197329,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[165].Exporter = func(v any, i int) any { switch v := v.(*CorsPolicy); i { case 0: return &v.state @@ -197341,7 +197341,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[166].Exporter = func(v any, i int) any { switch v := v.(*CreateInstancesInstanceGroupManagerRequest); i { case 0: return &v.state @@ -197353,7 +197353,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[167].Exporter = func(v any, i int) any { switch v := v.(*CreateInstancesRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -197365,7 +197365,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[168].Exporter = func(v any, i int) any { switch v := v.(*CreateSnapshotDiskRequest); i { case 0: return &v.state @@ -197377,7 +197377,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[169].Exporter = func(v any, i int) any { switch v := v.(*CreateSnapshotRegionDiskRequest); i { case 0: return &v.state @@ -197389,7 +197389,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[170].Exporter = func(v any, i int) any { switch v := v.(*CustomerEncryptionKey); i { case 0: return &v.state @@ -197401,7 +197401,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[171].Exporter = func(v any, i int) any { switch v := v.(*CustomerEncryptionKeyProtectedDisk); i { case 0: return &v.state @@ -197413,7 +197413,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[172].Exporter = func(v any, i int) any { switch v := v.(*Data); i { case 0: return &v.state @@ -197425,7 +197425,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[173].Exporter = func(v any, i int) any { switch v := v.(*DeleteAccessConfigInstanceRequest); i { case 0: return &v.state @@ -197437,7 +197437,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[174].Exporter = func(v any, i int) any { switch v := v.(*DeleteAddressRequest); i { case 0: return &v.state @@ -197449,7 +197449,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[175].Exporter = func(v any, i int) any { switch v := v.(*DeleteAutoscalerRequest); i { case 0: return &v.state @@ -197461,7 +197461,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[176].Exporter = func(v any, i int) any { switch v := v.(*DeleteBackendBucketRequest); i { case 0: return &v.state @@ -197473,7 +197473,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[177].Exporter = func(v any, i int) any { switch v := v.(*DeleteBackendServiceRequest); i { case 0: return &v.state @@ -197485,7 +197485,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[178].Exporter = func(v any, i int) any { switch v := v.(*DeleteDiskRequest); i { case 0: return &v.state @@ -197497,7 +197497,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[179].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[179].Exporter = func(v any, i int) any { switch v := v.(*DeleteExternalVpnGatewayRequest); i { case 0: return &v.state @@ -197509,7 +197509,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[180].Exporter = func(v any, i int) any { switch v := v.(*DeleteFirewallPolicyRequest); i { case 0: return &v.state @@ -197521,7 +197521,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[181].Exporter = func(v any, i int) any { switch v := v.(*DeleteFirewallRequest); i { case 0: return &v.state @@ -197533,7 +197533,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[182].Exporter = func(v any, i int) any { switch v := v.(*DeleteForwardingRuleRequest); i { case 0: return &v.state @@ -197545,7 +197545,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[183].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[183].Exporter = func(v any, i int) any { switch v := v.(*DeleteGlobalAddressRequest); i { case 0: return &v.state @@ -197557,7 +197557,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[184].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[184].Exporter = func(v any, i int) any { switch v := v.(*DeleteGlobalForwardingRuleRequest); i { case 0: return &v.state @@ -197569,7 +197569,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[185].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[185].Exporter = func(v any, i int) any { switch v := v.(*DeleteGlobalNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -197581,7 +197581,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[186].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[186].Exporter = func(v any, i int) any { switch v := v.(*DeleteGlobalOperationRequest); i { case 0: return &v.state @@ -197593,7 +197593,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[187].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[187].Exporter = func(v any, i int) any { switch v := v.(*DeleteGlobalOperationResponse); i { case 0: return &v.state @@ -197605,7 +197605,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[188].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[188].Exporter = func(v any, i int) any { switch v := v.(*DeleteGlobalOrganizationOperationRequest); i { case 0: return &v.state @@ -197617,7 +197617,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[189].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[189].Exporter = func(v any, i int) any { switch v := v.(*DeleteGlobalOrganizationOperationResponse); i { case 0: return &v.state @@ -197629,7 +197629,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[190].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[190].Exporter = func(v any, i int) any { switch v := v.(*DeleteGlobalPublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -197641,7 +197641,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[191].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[191].Exporter = func(v any, i int) any { switch v := v.(*DeleteHealthCheckRequest); i { case 0: return &v.state @@ -197653,7 +197653,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[192].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[192].Exporter = func(v any, i int) any { switch v := v.(*DeleteImageRequest); i { case 0: return &v.state @@ -197665,7 +197665,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[193].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[193].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstanceGroupManagerRequest); i { case 0: return &v.state @@ -197677,7 +197677,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[194].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[194].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstanceGroupManagerResizeRequestRequest); i { case 0: return &v.state @@ -197689,7 +197689,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[195].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[195].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstanceGroupRequest); i { case 0: return &v.state @@ -197701,7 +197701,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[196].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[196].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstanceRequest); i { case 0: return &v.state @@ -197713,7 +197713,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[197].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[197].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstanceTemplateRequest); i { case 0: return &v.state @@ -197725,7 +197725,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[198].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[198].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstancesInstanceGroupManagerRequest); i { case 0: return &v.state @@ -197737,7 +197737,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[199].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[199].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstancesRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -197749,7 +197749,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[200].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[200].Exporter = func(v any, i int) any { switch v := v.(*DeleteInstantSnapshotRequest); i { case 0: return &v.state @@ -197761,7 +197761,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[201].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[201].Exporter = func(v any, i int) any { switch v := v.(*DeleteInterconnectAttachmentRequest); i { case 0: return &v.state @@ -197773,7 +197773,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[202].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[202].Exporter = func(v any, i int) any { switch v := v.(*DeleteInterconnectRequest); i { case 0: return &v.state @@ -197785,7 +197785,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[203].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[203].Exporter = func(v any, i int) any { switch v := v.(*DeleteLicenseRequest); i { case 0: return &v.state @@ -197797,7 +197797,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[204].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[204].Exporter = func(v any, i int) any { switch v := v.(*DeleteMachineImageRequest); i { case 0: return &v.state @@ -197809,7 +197809,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[205].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[205].Exporter = func(v any, i int) any { switch v := v.(*DeleteNetworkAttachmentRequest); i { case 0: return &v.state @@ -197821,7 +197821,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[206].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[206].Exporter = func(v any, i int) any { switch v := v.(*DeleteNetworkEdgeSecurityServiceRequest); i { case 0: return &v.state @@ -197833,7 +197833,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[207].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[207].Exporter = func(v any, i int) any { switch v := v.(*DeleteNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -197845,7 +197845,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[208].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[208].Exporter = func(v any, i int) any { switch v := v.(*DeleteNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -197857,7 +197857,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[209].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[209].Exporter = func(v any, i int) any { switch v := v.(*DeleteNetworkRequest); i { case 0: return &v.state @@ -197869,7 +197869,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[210].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[210].Exporter = func(v any, i int) any { switch v := v.(*DeleteNodeGroupRequest); i { case 0: return &v.state @@ -197881,7 +197881,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[211].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[211].Exporter = func(v any, i int) any { switch v := v.(*DeleteNodeTemplateRequest); i { case 0: return &v.state @@ -197893,7 +197893,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[212].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[212].Exporter = func(v any, i int) any { switch v := v.(*DeleteNodesNodeGroupRequest); i { case 0: return &v.state @@ -197905,7 +197905,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[213].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[213].Exporter = func(v any, i int) any { switch v := v.(*DeletePacketMirroringRequest); i { case 0: return &v.state @@ -197917,7 +197917,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[214].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[214].Exporter = func(v any, i int) any { switch v := v.(*DeletePerInstanceConfigsInstanceGroupManagerRequest); i { case 0: return &v.state @@ -197929,7 +197929,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[215].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[215].Exporter = func(v any, i int) any { switch v := v.(*DeletePerInstanceConfigsRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -197941,7 +197941,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[216].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[216].Exporter = func(v any, i int) any { switch v := v.(*DeletePublicAdvertisedPrefixeRequest); i { case 0: return &v.state @@ -197953,7 +197953,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[217].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[217].Exporter = func(v any, i int) any { switch v := v.(*DeletePublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -197965,7 +197965,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[218].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[218].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionAutoscalerRequest); i { case 0: return &v.state @@ -197977,7 +197977,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[219].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[219].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionBackendServiceRequest); i { case 0: return &v.state @@ -197989,7 +197989,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[220].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[220].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionDiskRequest); i { case 0: return &v.state @@ -198001,7 +198001,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[221].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[221].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionHealthCheckRequest); i { case 0: return &v.state @@ -198013,7 +198013,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[222].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[222].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionHealthCheckServiceRequest); i { case 0: return &v.state @@ -198025,7 +198025,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[223].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[223].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -198037,7 +198037,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[224].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[224].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionInstanceTemplateRequest); i { case 0: return &v.state @@ -198049,7 +198049,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[225].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[225].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionInstantSnapshotRequest); i { case 0: return &v.state @@ -198061,7 +198061,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[226].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[226].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -198073,7 +198073,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[227].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[227].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -198085,7 +198085,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[228].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[228].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionNotificationEndpointRequest); i { case 0: return &v.state @@ -198097,7 +198097,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[229].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[229].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionOperationRequest); i { case 0: return &v.state @@ -198109,7 +198109,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[230].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[230].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionOperationResponse); i { case 0: return &v.state @@ -198121,7 +198121,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[231].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[231].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionSecurityPolicyRequest); i { case 0: return &v.state @@ -198133,7 +198133,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[232].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[232].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionSslCertificateRequest); i { case 0: return &v.state @@ -198145,7 +198145,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[233].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[233].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionSslPolicyRequest); i { case 0: return &v.state @@ -198157,7 +198157,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[234].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[234].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionTargetHttpProxyRequest); i { case 0: return &v.state @@ -198169,7 +198169,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[235].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[235].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionTargetHttpsProxyRequest); i { case 0: return &v.state @@ -198181,7 +198181,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[236].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[236].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionTargetTcpProxyRequest); i { case 0: return &v.state @@ -198193,7 +198193,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[237].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[237].Exporter = func(v any, i int) any { switch v := v.(*DeleteRegionUrlMapRequest); i { case 0: return &v.state @@ -198205,7 +198205,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[238].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[238].Exporter = func(v any, i int) any { switch v := v.(*DeleteReservationRequest); i { case 0: return &v.state @@ -198217,7 +198217,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[239].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[239].Exporter = func(v any, i int) any { switch v := v.(*DeleteResourcePolicyRequest); i { case 0: return &v.state @@ -198229,7 +198229,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[240].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[240].Exporter = func(v any, i int) any { switch v := v.(*DeleteRouteRequest); i { case 0: return &v.state @@ -198241,7 +198241,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[241].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[241].Exporter = func(v any, i int) any { switch v := v.(*DeleteRouterRequest); i { case 0: return &v.state @@ -198253,7 +198253,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[242].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[242].Exporter = func(v any, i int) any { switch v := v.(*DeleteSecurityPolicyRequest); i { case 0: return &v.state @@ -198265,7 +198265,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[243].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[243].Exporter = func(v any, i int) any { switch v := v.(*DeleteServiceAttachmentRequest); i { case 0: return &v.state @@ -198277,7 +198277,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[244].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[244].Exporter = func(v any, i int) any { switch v := v.(*DeleteSignedUrlKeyBackendBucketRequest); i { case 0: return &v.state @@ -198289,7 +198289,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[245].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[245].Exporter = func(v any, i int) any { switch v := v.(*DeleteSignedUrlKeyBackendServiceRequest); i { case 0: return &v.state @@ -198301,7 +198301,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[246].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[246].Exporter = func(v any, i int) any { switch v := v.(*DeleteSnapshotRequest); i { case 0: return &v.state @@ -198313,7 +198313,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[247].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[247].Exporter = func(v any, i int) any { switch v := v.(*DeleteSslCertificateRequest); i { case 0: return &v.state @@ -198325,7 +198325,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[248].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[248].Exporter = func(v any, i int) any { switch v := v.(*DeleteSslPolicyRequest); i { case 0: return &v.state @@ -198337,7 +198337,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[249].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[249].Exporter = func(v any, i int) any { switch v := v.(*DeleteStoragePoolRequest); i { case 0: return &v.state @@ -198349,7 +198349,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[250].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[250].Exporter = func(v any, i int) any { switch v := v.(*DeleteSubnetworkRequest); i { case 0: return &v.state @@ -198361,7 +198361,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[251].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[251].Exporter = func(v any, i int) any { switch v := v.(*DeleteTargetGrpcProxyRequest); i { case 0: return &v.state @@ -198373,7 +198373,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[252].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[252].Exporter = func(v any, i int) any { switch v := v.(*DeleteTargetHttpProxyRequest); i { case 0: return &v.state @@ -198385,7 +198385,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[253].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[253].Exporter = func(v any, i int) any { switch v := v.(*DeleteTargetHttpsProxyRequest); i { case 0: return &v.state @@ -198397,7 +198397,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[254].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[254].Exporter = func(v any, i int) any { switch v := v.(*DeleteTargetInstanceRequest); i { case 0: return &v.state @@ -198409,7 +198409,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[255].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[255].Exporter = func(v any, i int) any { switch v := v.(*DeleteTargetPoolRequest); i { case 0: return &v.state @@ -198421,7 +198421,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[256].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[256].Exporter = func(v any, i int) any { switch v := v.(*DeleteTargetSslProxyRequest); i { case 0: return &v.state @@ -198433,7 +198433,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[257].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[257].Exporter = func(v any, i int) any { switch v := v.(*DeleteTargetTcpProxyRequest); i { case 0: return &v.state @@ -198445,7 +198445,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[258].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[258].Exporter = func(v any, i int) any { switch v := v.(*DeleteTargetVpnGatewayRequest); i { case 0: return &v.state @@ -198457,7 +198457,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[259].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[259].Exporter = func(v any, i int) any { switch v := v.(*DeleteUrlMapRequest); i { case 0: return &v.state @@ -198469,7 +198469,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[260].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[260].Exporter = func(v any, i int) any { switch v := v.(*DeleteVpnGatewayRequest); i { case 0: return &v.state @@ -198481,7 +198481,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[261].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[261].Exporter = func(v any, i int) any { switch v := v.(*DeleteVpnTunnelRequest); i { case 0: return &v.state @@ -198493,7 +198493,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[262].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[262].Exporter = func(v any, i int) any { switch v := v.(*DeleteZoneOperationRequest); i { case 0: return &v.state @@ -198505,7 +198505,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[263].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[263].Exporter = func(v any, i int) any { switch v := v.(*DeleteZoneOperationResponse); i { case 0: return &v.state @@ -198517,7 +198517,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[264].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[264].Exporter = func(v any, i int) any { switch v := v.(*Denied); i { case 0: return &v.state @@ -198529,7 +198529,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[265].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[265].Exporter = func(v any, i int) any { switch v := v.(*DeprecateImageRequest); i { case 0: return &v.state @@ -198541,7 +198541,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[266].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[266].Exporter = func(v any, i int) any { switch v := v.(*DeprecationStatus); i { case 0: return &v.state @@ -198553,7 +198553,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[267].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[267].Exporter = func(v any, i int) any { switch v := v.(*DetachDiskInstanceRequest); i { case 0: return &v.state @@ -198565,7 +198565,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[268].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[268].Exporter = func(v any, i int) any { switch v := v.(*DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -198577,7 +198577,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[269].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[269].Exporter = func(v any, i int) any { switch v := v.(*DetachNetworkEndpointsNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -198589,7 +198589,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[270].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[270].Exporter = func(v any, i int) any { switch v := v.(*DetachNetworkEndpointsRegionNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -198601,7 +198601,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[271].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[271].Exporter = func(v any, i int) any { switch v := v.(*DisableXpnHostProjectRequest); i { case 0: return &v.state @@ -198613,7 +198613,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[272].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[272].Exporter = func(v any, i int) any { switch v := v.(*DisableXpnResourceProjectRequest); i { case 0: return &v.state @@ -198625,7 +198625,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[273].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[273].Exporter = func(v any, i int) any { switch v := v.(*Disk); i { case 0: return &v.state @@ -198637,7 +198637,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[274].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[274].Exporter = func(v any, i int) any { switch v := v.(*DiskAggregatedList); i { case 0: return &v.state @@ -198649,7 +198649,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[275].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[275].Exporter = func(v any, i int) any { switch v := v.(*DiskAsyncReplication); i { case 0: return &v.state @@ -198661,7 +198661,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[276].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[276].Exporter = func(v any, i int) any { switch v := v.(*DiskAsyncReplicationList); i { case 0: return &v.state @@ -198673,7 +198673,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[277].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[277].Exporter = func(v any, i int) any { switch v := v.(*DiskInstantiationConfig); i { case 0: return &v.state @@ -198685,7 +198685,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[278].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[278].Exporter = func(v any, i int) any { switch v := v.(*DiskList); i { case 0: return &v.state @@ -198697,7 +198697,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[279].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[279].Exporter = func(v any, i int) any { switch v := v.(*DiskMoveRequest); i { case 0: return &v.state @@ -198709,7 +198709,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[280].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[280].Exporter = func(v any, i int) any { switch v := v.(*DiskParams); i { case 0: return &v.state @@ -198721,7 +198721,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[281].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[281].Exporter = func(v any, i int) any { switch v := v.(*DiskResourceStatus); i { case 0: return &v.state @@ -198733,7 +198733,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[282].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[282].Exporter = func(v any, i int) any { switch v := v.(*DiskResourceStatusAsyncReplicationStatus); i { case 0: return &v.state @@ -198745,7 +198745,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[283].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[283].Exporter = func(v any, i int) any { switch v := v.(*DiskType); i { case 0: return &v.state @@ -198757,7 +198757,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[284].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[284].Exporter = func(v any, i int) any { switch v := v.(*DiskTypeAggregatedList); i { case 0: return &v.state @@ -198769,7 +198769,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[285].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[285].Exporter = func(v any, i int) any { switch v := v.(*DiskTypeList); i { case 0: return &v.state @@ -198781,7 +198781,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[286].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[286].Exporter = func(v any, i int) any { switch v := v.(*DiskTypesScopedList); i { case 0: return &v.state @@ -198793,7 +198793,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[287].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[287].Exporter = func(v any, i int) any { switch v := v.(*DisksAddResourcePoliciesRequest); i { case 0: return &v.state @@ -198805,7 +198805,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[288].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[288].Exporter = func(v any, i int) any { switch v := v.(*DisksRemoveResourcePoliciesRequest); i { case 0: return &v.state @@ -198817,7 +198817,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[289].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[289].Exporter = func(v any, i int) any { switch v := v.(*DisksResizeRequest); i { case 0: return &v.state @@ -198829,7 +198829,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[290].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[290].Exporter = func(v any, i int) any { switch v := v.(*DisksScopedList); i { case 0: return &v.state @@ -198841,7 +198841,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[291].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[291].Exporter = func(v any, i int) any { switch v := v.(*DisksStartAsyncReplicationRequest); i { case 0: return &v.state @@ -198853,7 +198853,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[292].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[292].Exporter = func(v any, i int) any { switch v := v.(*DisksStopGroupAsyncReplicationResource); i { case 0: return &v.state @@ -198865,7 +198865,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[293].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[293].Exporter = func(v any, i int) any { switch v := v.(*DisplayDevice); i { case 0: return &v.state @@ -198877,7 +198877,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[294].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[294].Exporter = func(v any, i int) any { switch v := v.(*DistributionPolicy); i { case 0: return &v.state @@ -198889,7 +198889,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[295].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[295].Exporter = func(v any, i int) any { switch v := v.(*DistributionPolicyZoneConfiguration); i { case 0: return &v.state @@ -198901,7 +198901,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[296].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[296].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state @@ -198913,7 +198913,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[297].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[297].Exporter = func(v any, i int) any { switch v := v.(*EnableXpnHostProjectRequest); i { case 0: return &v.state @@ -198925,7 +198925,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[298].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[298].Exporter = func(v any, i int) any { switch v := v.(*EnableXpnResourceProjectRequest); i { case 0: return &v.state @@ -198937,7 +198937,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[299].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[299].Exporter = func(v any, i int) any { switch v := v.(*Error); i { case 0: return &v.state @@ -198949,7 +198949,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[300].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[300].Exporter = func(v any, i int) any { switch v := v.(*ErrorDetails); i { case 0: return &v.state @@ -198961,7 +198961,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[301].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[301].Exporter = func(v any, i int) any { switch v := v.(*ErrorInfo); i { case 0: return &v.state @@ -198973,7 +198973,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[302].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[302].Exporter = func(v any, i int) any { switch v := v.(*Errors); i { case 0: return &v.state @@ -198985,7 +198985,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[303].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[303].Exporter = func(v any, i int) any { switch v := v.(*ExchangedPeeringRoute); i { case 0: return &v.state @@ -198997,7 +198997,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[304].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[304].Exporter = func(v any, i int) any { switch v := v.(*ExchangedPeeringRoutesList); i { case 0: return &v.state @@ -199009,7 +199009,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[305].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[305].Exporter = func(v any, i int) any { switch v := v.(*ExpandIpCidrRangeSubnetworkRequest); i { case 0: return &v.state @@ -199021,7 +199021,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[306].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[306].Exporter = func(v any, i int) any { switch v := v.(*Expr); i { case 0: return &v.state @@ -199033,7 +199033,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[307].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[307].Exporter = func(v any, i int) any { switch v := v.(*ExternalVpnGateway); i { case 0: return &v.state @@ -199045,7 +199045,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[308].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[308].Exporter = func(v any, i int) any { switch v := v.(*ExternalVpnGatewayInterface); i { case 0: return &v.state @@ -199057,7 +199057,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[309].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[309].Exporter = func(v any, i int) any { switch v := v.(*ExternalVpnGatewayList); i { case 0: return &v.state @@ -199069,7 +199069,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[310].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[310].Exporter = func(v any, i int) any { switch v := v.(*FileContentBuffer); i { case 0: return &v.state @@ -199081,7 +199081,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[311].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[311].Exporter = func(v any, i int) any { switch v := v.(*Firewall); i { case 0: return &v.state @@ -199093,7 +199093,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[312].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[312].Exporter = func(v any, i int) any { switch v := v.(*FirewallList); i { case 0: return &v.state @@ -199105,7 +199105,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[313].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[313].Exporter = func(v any, i int) any { switch v := v.(*FirewallLogConfig); i { case 0: return &v.state @@ -199117,7 +199117,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[314].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[314].Exporter = func(v any, i int) any { switch v := v.(*FirewallPoliciesListAssociationsResponse); i { case 0: return &v.state @@ -199129,7 +199129,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[315].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[315].Exporter = func(v any, i int) any { switch v := v.(*FirewallPolicy); i { case 0: return &v.state @@ -199141,7 +199141,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[316].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[316].Exporter = func(v any, i int) any { switch v := v.(*FirewallPolicyAssociation); i { case 0: return &v.state @@ -199153,7 +199153,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[317].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[317].Exporter = func(v any, i int) any { switch v := v.(*FirewallPolicyList); i { case 0: return &v.state @@ -199165,7 +199165,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[318].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[318].Exporter = func(v any, i int) any { switch v := v.(*FirewallPolicyRule); i { case 0: return &v.state @@ -199177,7 +199177,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[319].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[319].Exporter = func(v any, i int) any { switch v := v.(*FirewallPolicyRuleMatcher); i { case 0: return &v.state @@ -199189,7 +199189,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[320].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[320].Exporter = func(v any, i int) any { switch v := v.(*FirewallPolicyRuleMatcherLayer4Config); i { case 0: return &v.state @@ -199201,7 +199201,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[321].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[321].Exporter = func(v any, i int) any { switch v := v.(*FirewallPolicyRuleSecureTag); i { case 0: return &v.state @@ -199213,7 +199213,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[322].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[322].Exporter = func(v any, i int) any { switch v := v.(*FixedOrPercent); i { case 0: return &v.state @@ -199225,7 +199225,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[323].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[323].Exporter = func(v any, i int) any { switch v := v.(*ForwardingRule); i { case 0: return &v.state @@ -199237,7 +199237,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[324].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[324].Exporter = func(v any, i int) any { switch v := v.(*ForwardingRuleAggregatedList); i { case 0: return &v.state @@ -199249,7 +199249,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[325].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[325].Exporter = func(v any, i int) any { switch v := v.(*ForwardingRuleList); i { case 0: return &v.state @@ -199261,7 +199261,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[326].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[326].Exporter = func(v any, i int) any { switch v := v.(*ForwardingRuleReference); i { case 0: return &v.state @@ -199273,7 +199273,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[327].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[327].Exporter = func(v any, i int) any { switch v := v.(*ForwardingRuleServiceDirectoryRegistration); i { case 0: return &v.state @@ -199285,7 +199285,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[328].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[328].Exporter = func(v any, i int) any { switch v := v.(*ForwardingRulesScopedList); i { case 0: return &v.state @@ -199297,7 +199297,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[329].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[329].Exporter = func(v any, i int) any { switch v := v.(*GRPCHealthCheck); i { case 0: return &v.state @@ -199309,7 +199309,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[330].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[330].Exporter = func(v any, i int) any { switch v := v.(*GetAcceleratorTypeRequest); i { case 0: return &v.state @@ -199321,7 +199321,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[331].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[331].Exporter = func(v any, i int) any { switch v := v.(*GetAddressRequest); i { case 0: return &v.state @@ -199333,7 +199333,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[332].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[332].Exporter = func(v any, i int) any { switch v := v.(*GetAssociationFirewallPolicyRequest); i { case 0: return &v.state @@ -199345,7 +199345,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[333].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[333].Exporter = func(v any, i int) any { switch v := v.(*GetAssociationNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -199357,7 +199357,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[334].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[334].Exporter = func(v any, i int) any { switch v := v.(*GetAssociationRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -199369,7 +199369,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[335].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[335].Exporter = func(v any, i int) any { switch v := v.(*GetAutoscalerRequest); i { case 0: return &v.state @@ -199381,7 +199381,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[336].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[336].Exporter = func(v any, i int) any { switch v := v.(*GetBackendBucketRequest); i { case 0: return &v.state @@ -199393,7 +199393,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[337].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[337].Exporter = func(v any, i int) any { switch v := v.(*GetBackendServiceRequest); i { case 0: return &v.state @@ -199405,7 +199405,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[338].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[338].Exporter = func(v any, i int) any { switch v := v.(*GetDiagnosticsInterconnectRequest); i { case 0: return &v.state @@ -199417,7 +199417,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[339].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[339].Exporter = func(v any, i int) any { switch v := v.(*GetDiskRequest); i { case 0: return &v.state @@ -199429,7 +199429,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[340].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[340].Exporter = func(v any, i int) any { switch v := v.(*GetDiskTypeRequest); i { case 0: return &v.state @@ -199441,7 +199441,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[341].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[341].Exporter = func(v any, i int) any { switch v := v.(*GetEffectiveFirewallsInstanceRequest); i { case 0: return &v.state @@ -199453,7 +199453,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[342].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[342].Exporter = func(v any, i int) any { switch v := v.(*GetEffectiveFirewallsNetworkRequest); i { case 0: return &v.state @@ -199465,7 +199465,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[343].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[343].Exporter = func(v any, i int) any { switch v := v.(*GetEffectiveFirewallsRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -199477,7 +199477,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[344].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[344].Exporter = func(v any, i int) any { switch v := v.(*GetExternalVpnGatewayRequest); i { case 0: return &v.state @@ -199489,7 +199489,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[345].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[345].Exporter = func(v any, i int) any { switch v := v.(*GetFirewallPolicyRequest); i { case 0: return &v.state @@ -199501,7 +199501,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[346].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[346].Exporter = func(v any, i int) any { switch v := v.(*GetFirewallRequest); i { case 0: return &v.state @@ -199513,7 +199513,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[347].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[347].Exporter = func(v any, i int) any { switch v := v.(*GetForwardingRuleRequest); i { case 0: return &v.state @@ -199525,7 +199525,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[348].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[348].Exporter = func(v any, i int) any { switch v := v.(*GetFromFamilyImageRequest); i { case 0: return &v.state @@ -199537,7 +199537,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[349].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[349].Exporter = func(v any, i int) any { switch v := v.(*GetGlobalAddressRequest); i { case 0: return &v.state @@ -199549,7 +199549,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[350].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[350].Exporter = func(v any, i int) any { switch v := v.(*GetGlobalForwardingRuleRequest); i { case 0: return &v.state @@ -199561,7 +199561,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[351].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[351].Exporter = func(v any, i int) any { switch v := v.(*GetGlobalNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -199573,7 +199573,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[352].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[352].Exporter = func(v any, i int) any { switch v := v.(*GetGlobalOperationRequest); i { case 0: return &v.state @@ -199585,7 +199585,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[353].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[353].Exporter = func(v any, i int) any { switch v := v.(*GetGlobalOrganizationOperationRequest); i { case 0: return &v.state @@ -199597,7 +199597,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[354].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[354].Exporter = func(v any, i int) any { switch v := v.(*GetGlobalPublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -199609,7 +199609,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[355].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[355].Exporter = func(v any, i int) any { switch v := v.(*GetGuestAttributesInstanceRequest); i { case 0: return &v.state @@ -199621,7 +199621,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[356].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[356].Exporter = func(v any, i int) any { switch v := v.(*GetHealthBackendServiceRequest); i { case 0: return &v.state @@ -199633,7 +199633,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[357].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[357].Exporter = func(v any, i int) any { switch v := v.(*GetHealthCheckRequest); i { case 0: return &v.state @@ -199645,7 +199645,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[358].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[358].Exporter = func(v any, i int) any { switch v := v.(*GetHealthRegionBackendServiceRequest); i { case 0: return &v.state @@ -199657,7 +199657,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[359].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[359].Exporter = func(v any, i int) any { switch v := v.(*GetHealthTargetPoolRequest); i { case 0: return &v.state @@ -199669,7 +199669,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[360].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[360].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyBackendBucketRequest); i { case 0: return &v.state @@ -199681,7 +199681,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[361].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[361].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyBackendServiceRequest); i { case 0: return &v.state @@ -199693,7 +199693,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[362].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[362].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyDiskRequest); i { case 0: return &v.state @@ -199705,7 +199705,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[363].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[363].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyFirewallPolicyRequest); i { case 0: return &v.state @@ -199717,7 +199717,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[364].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[364].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyImageRequest); i { case 0: return &v.state @@ -199729,7 +199729,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[365].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[365].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyInstanceRequest); i { case 0: return &v.state @@ -199741,7 +199741,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[366].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[366].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyInstanceTemplateRequest); i { case 0: return &v.state @@ -199753,7 +199753,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[367].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[367].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyInstantSnapshotRequest); i { case 0: return &v.state @@ -199765,7 +199765,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[368].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[368].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyLicenseRequest); i { case 0: return &v.state @@ -199777,7 +199777,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[369].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[369].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyMachineImageRequest); i { case 0: return &v.state @@ -199789,7 +199789,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[370].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[370].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyNetworkAttachmentRequest); i { case 0: return &v.state @@ -199801,7 +199801,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[371].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[371].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -199813,7 +199813,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[372].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[372].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyNodeGroupRequest); i { case 0: return &v.state @@ -199825,7 +199825,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[373].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[373].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyNodeTemplateRequest); i { case 0: return &v.state @@ -199837,7 +199837,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[374].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[374].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyRegionBackendServiceRequest); i { case 0: return &v.state @@ -199849,7 +199849,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[375].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[375].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyRegionDiskRequest); i { case 0: return &v.state @@ -199861,7 +199861,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[376].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[376].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyRegionInstantSnapshotRequest); i { case 0: return &v.state @@ -199873,7 +199873,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[377].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[377].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -199885,7 +199885,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[378].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[378].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyReservationRequest); i { case 0: return &v.state @@ -199897,7 +199897,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[379].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[379].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyResourcePolicyRequest); i { case 0: return &v.state @@ -199909,7 +199909,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[380].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[380].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyServiceAttachmentRequest); i { case 0: return &v.state @@ -199921,7 +199921,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[381].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[381].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicySnapshotRequest); i { case 0: return &v.state @@ -199933,7 +199933,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[382].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[382].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicyStoragePoolRequest); i { case 0: return &v.state @@ -199945,7 +199945,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[383].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[383].Exporter = func(v any, i int) any { switch v := v.(*GetIamPolicySubnetworkRequest); i { case 0: return &v.state @@ -199957,7 +199957,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[384].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[384].Exporter = func(v any, i int) any { switch v := v.(*GetImageFamilyViewRequest); i { case 0: return &v.state @@ -199969,7 +199969,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[385].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[385].Exporter = func(v any, i int) any { switch v := v.(*GetImageRequest); i { case 0: return &v.state @@ -199981,7 +199981,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[386].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[386].Exporter = func(v any, i int) any { switch v := v.(*GetInstanceGroupManagerRequest); i { case 0: return &v.state @@ -199993,7 +199993,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[387].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[387].Exporter = func(v any, i int) any { switch v := v.(*GetInstanceGroupManagerResizeRequestRequest); i { case 0: return &v.state @@ -200005,7 +200005,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[388].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[388].Exporter = func(v any, i int) any { switch v := v.(*GetInstanceGroupRequest); i { case 0: return &v.state @@ -200017,7 +200017,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[389].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[389].Exporter = func(v any, i int) any { switch v := v.(*GetInstanceRequest); i { case 0: return &v.state @@ -200029,7 +200029,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[390].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[390].Exporter = func(v any, i int) any { switch v := v.(*GetInstanceSettingRequest); i { case 0: return &v.state @@ -200041,7 +200041,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[391].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[391].Exporter = func(v any, i int) any { switch v := v.(*GetInstanceTemplateRequest); i { case 0: return &v.state @@ -200053,7 +200053,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[392].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[392].Exporter = func(v any, i int) any { switch v := v.(*GetInstantSnapshotRequest); i { case 0: return &v.state @@ -200065,7 +200065,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[393].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[393].Exporter = func(v any, i int) any { switch v := v.(*GetInterconnectAttachmentRequest); i { case 0: return &v.state @@ -200077,7 +200077,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[394].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[394].Exporter = func(v any, i int) any { switch v := v.(*GetInterconnectLocationRequest); i { case 0: return &v.state @@ -200089,7 +200089,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[395].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[395].Exporter = func(v any, i int) any { switch v := v.(*GetInterconnectRemoteLocationRequest); i { case 0: return &v.state @@ -200101,7 +200101,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[396].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[396].Exporter = func(v any, i int) any { switch v := v.(*GetInterconnectRequest); i { case 0: return &v.state @@ -200113,7 +200113,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[397].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[397].Exporter = func(v any, i int) any { switch v := v.(*GetLicenseCodeRequest); i { case 0: return &v.state @@ -200125,7 +200125,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[398].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[398].Exporter = func(v any, i int) any { switch v := v.(*GetLicenseRequest); i { case 0: return &v.state @@ -200137,7 +200137,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[399].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[399].Exporter = func(v any, i int) any { switch v := v.(*GetMachineImageRequest); i { case 0: return &v.state @@ -200149,7 +200149,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[400].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[400].Exporter = func(v any, i int) any { switch v := v.(*GetMachineTypeRequest); i { case 0: return &v.state @@ -200161,7 +200161,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[401].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[401].Exporter = func(v any, i int) any { switch v := v.(*GetMacsecConfigInterconnectRequest); i { case 0: return &v.state @@ -200173,7 +200173,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[402].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[402].Exporter = func(v any, i int) any { switch v := v.(*GetNatIpInfoRouterRequest); i { case 0: return &v.state @@ -200185,7 +200185,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[403].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[403].Exporter = func(v any, i int) any { switch v := v.(*GetNatMappingInfoRoutersRequest); i { case 0: return &v.state @@ -200197,7 +200197,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[404].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[404].Exporter = func(v any, i int) any { switch v := v.(*GetNetworkAttachmentRequest); i { case 0: return &v.state @@ -200209,7 +200209,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[405].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[405].Exporter = func(v any, i int) any { switch v := v.(*GetNetworkEdgeSecurityServiceRequest); i { case 0: return &v.state @@ -200221,7 +200221,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[406].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[406].Exporter = func(v any, i int) any { switch v := v.(*GetNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -200233,7 +200233,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[407].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[407].Exporter = func(v any, i int) any { switch v := v.(*GetNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -200245,7 +200245,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[408].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[408].Exporter = func(v any, i int) any { switch v := v.(*GetNetworkRequest); i { case 0: return &v.state @@ -200257,7 +200257,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[409].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[409].Exporter = func(v any, i int) any { switch v := v.(*GetNodeGroupRequest); i { case 0: return &v.state @@ -200269,7 +200269,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[410].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[410].Exporter = func(v any, i int) any { switch v := v.(*GetNodeTemplateRequest); i { case 0: return &v.state @@ -200281,7 +200281,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[411].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[411].Exporter = func(v any, i int) any { switch v := v.(*GetNodeTypeRequest); i { case 0: return &v.state @@ -200293,7 +200293,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[412].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[412].Exporter = func(v any, i int) any { switch v := v.(*GetPacketMirroringRequest); i { case 0: return &v.state @@ -200305,7 +200305,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[413].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[413].Exporter = func(v any, i int) any { switch v := v.(*GetProjectRequest); i { case 0: return &v.state @@ -200317,7 +200317,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[414].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[414].Exporter = func(v any, i int) any { switch v := v.(*GetPublicAdvertisedPrefixeRequest); i { case 0: return &v.state @@ -200329,7 +200329,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[415].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[415].Exporter = func(v any, i int) any { switch v := v.(*GetPublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -200341,7 +200341,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[416].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[416].Exporter = func(v any, i int) any { switch v := v.(*GetRegionAutoscalerRequest); i { case 0: return &v.state @@ -200353,7 +200353,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[417].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[417].Exporter = func(v any, i int) any { switch v := v.(*GetRegionBackendServiceRequest); i { case 0: return &v.state @@ -200365,7 +200365,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[418].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[418].Exporter = func(v any, i int) any { switch v := v.(*GetRegionCommitmentRequest); i { case 0: return &v.state @@ -200377,7 +200377,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[419].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[419].Exporter = func(v any, i int) any { switch v := v.(*GetRegionDiskRequest); i { case 0: return &v.state @@ -200389,7 +200389,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[420].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[420].Exporter = func(v any, i int) any { switch v := v.(*GetRegionDiskTypeRequest); i { case 0: return &v.state @@ -200401,7 +200401,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[421].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[421].Exporter = func(v any, i int) any { switch v := v.(*GetRegionHealthCheckRequest); i { case 0: return &v.state @@ -200413,7 +200413,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[422].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[422].Exporter = func(v any, i int) any { switch v := v.(*GetRegionHealthCheckServiceRequest); i { case 0: return &v.state @@ -200425,7 +200425,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[423].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[423].Exporter = func(v any, i int) any { switch v := v.(*GetRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -200437,7 +200437,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[424].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[424].Exporter = func(v any, i int) any { switch v := v.(*GetRegionInstanceGroupRequest); i { case 0: return &v.state @@ -200449,7 +200449,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[425].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[425].Exporter = func(v any, i int) any { switch v := v.(*GetRegionInstanceTemplateRequest); i { case 0: return &v.state @@ -200461,7 +200461,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[426].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[426].Exporter = func(v any, i int) any { switch v := v.(*GetRegionInstantSnapshotRequest); i { case 0: return &v.state @@ -200473,7 +200473,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[427].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[427].Exporter = func(v any, i int) any { switch v := v.(*GetRegionNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -200485,7 +200485,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[428].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[428].Exporter = func(v any, i int) any { switch v := v.(*GetRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -200497,7 +200497,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[429].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[429].Exporter = func(v any, i int) any { switch v := v.(*GetRegionNotificationEndpointRequest); i { case 0: return &v.state @@ -200509,7 +200509,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[430].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[430].Exporter = func(v any, i int) any { switch v := v.(*GetRegionOperationRequest); i { case 0: return &v.state @@ -200521,7 +200521,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[431].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[431].Exporter = func(v any, i int) any { switch v := v.(*GetRegionRequest); i { case 0: return &v.state @@ -200533,7 +200533,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[432].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[432].Exporter = func(v any, i int) any { switch v := v.(*GetRegionSecurityPolicyRequest); i { case 0: return &v.state @@ -200545,7 +200545,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[433].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[433].Exporter = func(v any, i int) any { switch v := v.(*GetRegionSslCertificateRequest); i { case 0: return &v.state @@ -200557,7 +200557,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[434].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[434].Exporter = func(v any, i int) any { switch v := v.(*GetRegionSslPolicyRequest); i { case 0: return &v.state @@ -200569,7 +200569,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[435].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[435].Exporter = func(v any, i int) any { switch v := v.(*GetRegionTargetHttpProxyRequest); i { case 0: return &v.state @@ -200581,7 +200581,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[436].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[436].Exporter = func(v any, i int) any { switch v := v.(*GetRegionTargetHttpsProxyRequest); i { case 0: return &v.state @@ -200593,7 +200593,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[437].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[437].Exporter = func(v any, i int) any { switch v := v.(*GetRegionTargetTcpProxyRequest); i { case 0: return &v.state @@ -200605,7 +200605,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[438].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[438].Exporter = func(v any, i int) any { switch v := v.(*GetRegionUrlMapRequest); i { case 0: return &v.state @@ -200617,7 +200617,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[439].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[439].Exporter = func(v any, i int) any { switch v := v.(*GetReservationRequest); i { case 0: return &v.state @@ -200629,7 +200629,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[440].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[440].Exporter = func(v any, i int) any { switch v := v.(*GetResourcePolicyRequest); i { case 0: return &v.state @@ -200641,7 +200641,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[441].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[441].Exporter = func(v any, i int) any { switch v := v.(*GetRouteRequest); i { case 0: return &v.state @@ -200653,7 +200653,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[442].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[442].Exporter = func(v any, i int) any { switch v := v.(*GetRouterRequest); i { case 0: return &v.state @@ -200665,7 +200665,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[443].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[443].Exporter = func(v any, i int) any { switch v := v.(*GetRouterStatusRouterRequest); i { case 0: return &v.state @@ -200677,7 +200677,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[444].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[444].Exporter = func(v any, i int) any { switch v := v.(*GetRuleFirewallPolicyRequest); i { case 0: return &v.state @@ -200689,7 +200689,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[445].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[445].Exporter = func(v any, i int) any { switch v := v.(*GetRuleNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -200701,7 +200701,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[446].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[446].Exporter = func(v any, i int) any { switch v := v.(*GetRuleRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -200713,7 +200713,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[447].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[447].Exporter = func(v any, i int) any { switch v := v.(*GetRuleRegionSecurityPolicyRequest); i { case 0: return &v.state @@ -200725,7 +200725,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[448].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[448].Exporter = func(v any, i int) any { switch v := v.(*GetRuleSecurityPolicyRequest); i { case 0: return &v.state @@ -200737,7 +200737,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[449].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[449].Exporter = func(v any, i int) any { switch v := v.(*GetScreenshotInstanceRequest); i { case 0: return &v.state @@ -200749,7 +200749,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[450].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[450].Exporter = func(v any, i int) any { switch v := v.(*GetSecurityPolicyRequest); i { case 0: return &v.state @@ -200761,7 +200761,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[451].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[451].Exporter = func(v any, i int) any { switch v := v.(*GetSerialPortOutputInstanceRequest); i { case 0: return &v.state @@ -200773,7 +200773,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[452].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[452].Exporter = func(v any, i int) any { switch v := v.(*GetServiceAttachmentRequest); i { case 0: return &v.state @@ -200785,7 +200785,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[453].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[453].Exporter = func(v any, i int) any { switch v := v.(*GetShieldedInstanceIdentityInstanceRequest); i { case 0: return &v.state @@ -200797,7 +200797,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[454].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[454].Exporter = func(v any, i int) any { switch v := v.(*GetSnapshotRequest); i { case 0: return &v.state @@ -200809,7 +200809,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[455].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[455].Exporter = func(v any, i int) any { switch v := v.(*GetSnapshotSettingRequest); i { case 0: return &v.state @@ -200821,7 +200821,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[456].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[456].Exporter = func(v any, i int) any { switch v := v.(*GetSslCertificateRequest); i { case 0: return &v.state @@ -200833,7 +200833,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[457].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[457].Exporter = func(v any, i int) any { switch v := v.(*GetSslPolicyRequest); i { case 0: return &v.state @@ -200845,7 +200845,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[458].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[458].Exporter = func(v any, i int) any { switch v := v.(*GetStatusVpnGatewayRequest); i { case 0: return &v.state @@ -200857,7 +200857,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[459].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[459].Exporter = func(v any, i int) any { switch v := v.(*GetStoragePoolRequest); i { case 0: return &v.state @@ -200869,7 +200869,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[460].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[460].Exporter = func(v any, i int) any { switch v := v.(*GetStoragePoolTypeRequest); i { case 0: return &v.state @@ -200881,7 +200881,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[461].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[461].Exporter = func(v any, i int) any { switch v := v.(*GetSubnetworkRequest); i { case 0: return &v.state @@ -200893,7 +200893,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[462].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[462].Exporter = func(v any, i int) any { switch v := v.(*GetTargetGrpcProxyRequest); i { case 0: return &v.state @@ -200905,7 +200905,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[463].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[463].Exporter = func(v any, i int) any { switch v := v.(*GetTargetHttpProxyRequest); i { case 0: return &v.state @@ -200917,7 +200917,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[464].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[464].Exporter = func(v any, i int) any { switch v := v.(*GetTargetHttpsProxyRequest); i { case 0: return &v.state @@ -200929,7 +200929,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[465].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[465].Exporter = func(v any, i int) any { switch v := v.(*GetTargetInstanceRequest); i { case 0: return &v.state @@ -200941,7 +200941,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[466].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[466].Exporter = func(v any, i int) any { switch v := v.(*GetTargetPoolRequest); i { case 0: return &v.state @@ -200953,7 +200953,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[467].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[467].Exporter = func(v any, i int) any { switch v := v.(*GetTargetSslProxyRequest); i { case 0: return &v.state @@ -200965,7 +200965,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[468].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[468].Exporter = func(v any, i int) any { switch v := v.(*GetTargetTcpProxyRequest); i { case 0: return &v.state @@ -200977,7 +200977,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[469].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[469].Exporter = func(v any, i int) any { switch v := v.(*GetTargetVpnGatewayRequest); i { case 0: return &v.state @@ -200989,7 +200989,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[470].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[470].Exporter = func(v any, i int) any { switch v := v.(*GetUrlMapRequest); i { case 0: return &v.state @@ -201001,7 +201001,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[471].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[471].Exporter = func(v any, i int) any { switch v := v.(*GetVpnGatewayRequest); i { case 0: return &v.state @@ -201013,7 +201013,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[472].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[472].Exporter = func(v any, i int) any { switch v := v.(*GetVpnTunnelRequest); i { case 0: return &v.state @@ -201025,7 +201025,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[473].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[473].Exporter = func(v any, i int) any { switch v := v.(*GetXpnHostProjectRequest); i { case 0: return &v.state @@ -201037,7 +201037,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[474].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[474].Exporter = func(v any, i int) any { switch v := v.(*GetXpnResourcesProjectsRequest); i { case 0: return &v.state @@ -201049,7 +201049,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[475].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[475].Exporter = func(v any, i int) any { switch v := v.(*GetZoneOperationRequest); i { case 0: return &v.state @@ -201061,7 +201061,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[476].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[476].Exporter = func(v any, i int) any { switch v := v.(*GetZoneRequest); i { case 0: return &v.state @@ -201073,7 +201073,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[477].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[477].Exporter = func(v any, i int) any { switch v := v.(*GlobalAddressesMoveRequest); i { case 0: return &v.state @@ -201085,7 +201085,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[478].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[478].Exporter = func(v any, i int) any { switch v := v.(*GlobalNetworkEndpointGroupsAttachEndpointsRequest); i { case 0: return &v.state @@ -201097,7 +201097,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[479].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[479].Exporter = func(v any, i int) any { switch v := v.(*GlobalNetworkEndpointGroupsDetachEndpointsRequest); i { case 0: return &v.state @@ -201109,7 +201109,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[480].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[480].Exporter = func(v any, i int) any { switch v := v.(*GlobalOrganizationSetPolicyRequest); i { case 0: return &v.state @@ -201121,7 +201121,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[481].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[481].Exporter = func(v any, i int) any { switch v := v.(*GlobalSetLabelsRequest); i { case 0: return &v.state @@ -201133,7 +201133,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[482].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[482].Exporter = func(v any, i int) any { switch v := v.(*GlobalSetPolicyRequest); i { case 0: return &v.state @@ -201145,7 +201145,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[483].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[483].Exporter = func(v any, i int) any { switch v := v.(*GuestAttributes); i { case 0: return &v.state @@ -201157,7 +201157,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[484].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[484].Exporter = func(v any, i int) any { switch v := v.(*GuestAttributesEntry); i { case 0: return &v.state @@ -201169,7 +201169,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[485].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[485].Exporter = func(v any, i int) any { switch v := v.(*GuestAttributesValue); i { case 0: return &v.state @@ -201181,7 +201181,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[486].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[486].Exporter = func(v any, i int) any { switch v := v.(*GuestOsFeature); i { case 0: return &v.state @@ -201193,7 +201193,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[487].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[487].Exporter = func(v any, i int) any { switch v := v.(*HTTP2HealthCheck); i { case 0: return &v.state @@ -201205,7 +201205,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[488].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[488].Exporter = func(v any, i int) any { switch v := v.(*HTTPHealthCheck); i { case 0: return &v.state @@ -201217,7 +201217,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[489].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[489].Exporter = func(v any, i int) any { switch v := v.(*HTTPSHealthCheck); i { case 0: return &v.state @@ -201229,7 +201229,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[490].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[490].Exporter = func(v any, i int) any { switch v := v.(*HealthCheck); i { case 0: return &v.state @@ -201241,7 +201241,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[491].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[491].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckList); i { case 0: return &v.state @@ -201253,7 +201253,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[492].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[492].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckLogConfig); i { case 0: return &v.state @@ -201265,7 +201265,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[493].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[493].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckReference); i { case 0: return &v.state @@ -201277,7 +201277,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[494].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[494].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckService); i { case 0: return &v.state @@ -201289,7 +201289,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[495].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[495].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckServiceReference); i { case 0: return &v.state @@ -201301,7 +201301,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[496].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[496].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckServicesList); i { case 0: return &v.state @@ -201313,7 +201313,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[497].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[497].Exporter = func(v any, i int) any { switch v := v.(*HealthChecksAggregatedList); i { case 0: return &v.state @@ -201325,7 +201325,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[498].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[498].Exporter = func(v any, i int) any { switch v := v.(*HealthChecksScopedList); i { case 0: return &v.state @@ -201337,7 +201337,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[499].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[499].Exporter = func(v any, i int) any { switch v := v.(*HealthStatus); i { case 0: return &v.state @@ -201349,7 +201349,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[500].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[500].Exporter = func(v any, i int) any { switch v := v.(*HealthStatusForNetworkEndpoint); i { case 0: return &v.state @@ -201361,7 +201361,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[501].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[501].Exporter = func(v any, i int) any { switch v := v.(*Help); i { case 0: return &v.state @@ -201373,7 +201373,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[502].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[502].Exporter = func(v any, i int) any { switch v := v.(*HelpLink); i { case 0: return &v.state @@ -201385,7 +201385,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[503].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[503].Exporter = func(v any, i int) any { switch v := v.(*HostRule); i { case 0: return &v.state @@ -201397,7 +201397,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[504].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[504].Exporter = func(v any, i int) any { switch v := v.(*HttpFaultAbort); i { case 0: return &v.state @@ -201409,7 +201409,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[505].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[505].Exporter = func(v any, i int) any { switch v := v.(*HttpFaultDelay); i { case 0: return &v.state @@ -201421,7 +201421,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[506].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[506].Exporter = func(v any, i int) any { switch v := v.(*HttpFaultInjection); i { case 0: return &v.state @@ -201433,7 +201433,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[507].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[507].Exporter = func(v any, i int) any { switch v := v.(*HttpHeaderAction); i { case 0: return &v.state @@ -201445,7 +201445,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[508].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[508].Exporter = func(v any, i int) any { switch v := v.(*HttpHeaderMatch); i { case 0: return &v.state @@ -201457,7 +201457,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[509].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[509].Exporter = func(v any, i int) any { switch v := v.(*HttpHeaderOption); i { case 0: return &v.state @@ -201469,7 +201469,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[510].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[510].Exporter = func(v any, i int) any { switch v := v.(*HttpQueryParameterMatch); i { case 0: return &v.state @@ -201481,7 +201481,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[511].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[511].Exporter = func(v any, i int) any { switch v := v.(*HttpRedirectAction); i { case 0: return &v.state @@ -201493,7 +201493,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[512].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[512].Exporter = func(v any, i int) any { switch v := v.(*HttpRetryPolicy); i { case 0: return &v.state @@ -201505,7 +201505,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[513].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[513].Exporter = func(v any, i int) any { switch v := v.(*HttpRouteAction); i { case 0: return &v.state @@ -201517,7 +201517,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[514].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[514].Exporter = func(v any, i int) any { switch v := v.(*HttpRouteRule); i { case 0: return &v.state @@ -201529,7 +201529,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[515].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[515].Exporter = func(v any, i int) any { switch v := v.(*HttpRouteRuleMatch); i { case 0: return &v.state @@ -201541,7 +201541,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[516].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[516].Exporter = func(v any, i int) any { switch v := v.(*Image); i { case 0: return &v.state @@ -201553,7 +201553,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[517].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[517].Exporter = func(v any, i int) any { switch v := v.(*ImageFamilyView); i { case 0: return &v.state @@ -201565,7 +201565,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[518].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[518].Exporter = func(v any, i int) any { switch v := v.(*ImageList); i { case 0: return &v.state @@ -201577,7 +201577,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[519].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[519].Exporter = func(v any, i int) any { switch v := v.(*InitialStateConfig); i { case 0: return &v.state @@ -201589,7 +201589,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[520].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[520].Exporter = func(v any, i int) any { switch v := v.(*InsertAddressRequest); i { case 0: return &v.state @@ -201601,7 +201601,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[521].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[521].Exporter = func(v any, i int) any { switch v := v.(*InsertAutoscalerRequest); i { case 0: return &v.state @@ -201613,7 +201613,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[522].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[522].Exporter = func(v any, i int) any { switch v := v.(*InsertBackendBucketRequest); i { case 0: return &v.state @@ -201625,7 +201625,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[523].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[523].Exporter = func(v any, i int) any { switch v := v.(*InsertBackendServiceRequest); i { case 0: return &v.state @@ -201637,7 +201637,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[524].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[524].Exporter = func(v any, i int) any { switch v := v.(*InsertDiskRequest); i { case 0: return &v.state @@ -201649,7 +201649,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[525].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[525].Exporter = func(v any, i int) any { switch v := v.(*InsertExternalVpnGatewayRequest); i { case 0: return &v.state @@ -201661,7 +201661,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[526].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[526].Exporter = func(v any, i int) any { switch v := v.(*InsertFirewallPolicyRequest); i { case 0: return &v.state @@ -201673,7 +201673,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[527].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[527].Exporter = func(v any, i int) any { switch v := v.(*InsertFirewallRequest); i { case 0: return &v.state @@ -201685,7 +201685,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[528].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[528].Exporter = func(v any, i int) any { switch v := v.(*InsertForwardingRuleRequest); i { case 0: return &v.state @@ -201697,7 +201697,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[529].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[529].Exporter = func(v any, i int) any { switch v := v.(*InsertGlobalAddressRequest); i { case 0: return &v.state @@ -201709,7 +201709,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[530].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[530].Exporter = func(v any, i int) any { switch v := v.(*InsertGlobalForwardingRuleRequest); i { case 0: return &v.state @@ -201721,7 +201721,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[531].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[531].Exporter = func(v any, i int) any { switch v := v.(*InsertGlobalNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -201733,7 +201733,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[532].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[532].Exporter = func(v any, i int) any { switch v := v.(*InsertGlobalPublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -201745,7 +201745,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[533].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[533].Exporter = func(v any, i int) any { switch v := v.(*InsertHealthCheckRequest); i { case 0: return &v.state @@ -201757,7 +201757,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[534].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[534].Exporter = func(v any, i int) any { switch v := v.(*InsertImageRequest); i { case 0: return &v.state @@ -201769,7 +201769,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[535].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[535].Exporter = func(v any, i int) any { switch v := v.(*InsertInstanceGroupManagerRequest); i { case 0: return &v.state @@ -201781,7 +201781,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[536].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[536].Exporter = func(v any, i int) any { switch v := v.(*InsertInstanceGroupManagerResizeRequestRequest); i { case 0: return &v.state @@ -201793,7 +201793,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[537].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[537].Exporter = func(v any, i int) any { switch v := v.(*InsertInstanceGroupRequest); i { case 0: return &v.state @@ -201805,7 +201805,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[538].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[538].Exporter = func(v any, i int) any { switch v := v.(*InsertInstanceRequest); i { case 0: return &v.state @@ -201817,7 +201817,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[539].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[539].Exporter = func(v any, i int) any { switch v := v.(*InsertInstanceTemplateRequest); i { case 0: return &v.state @@ -201829,7 +201829,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[540].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[540].Exporter = func(v any, i int) any { switch v := v.(*InsertInstantSnapshotRequest); i { case 0: return &v.state @@ -201841,7 +201841,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[541].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[541].Exporter = func(v any, i int) any { switch v := v.(*InsertInterconnectAttachmentRequest); i { case 0: return &v.state @@ -201853,7 +201853,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[542].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[542].Exporter = func(v any, i int) any { switch v := v.(*InsertInterconnectRequest); i { case 0: return &v.state @@ -201865,7 +201865,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[543].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[543].Exporter = func(v any, i int) any { switch v := v.(*InsertLicenseRequest); i { case 0: return &v.state @@ -201877,7 +201877,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[544].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[544].Exporter = func(v any, i int) any { switch v := v.(*InsertMachineImageRequest); i { case 0: return &v.state @@ -201889,7 +201889,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[545].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[545].Exporter = func(v any, i int) any { switch v := v.(*InsertNetworkAttachmentRequest); i { case 0: return &v.state @@ -201901,7 +201901,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[546].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[546].Exporter = func(v any, i int) any { switch v := v.(*InsertNetworkEdgeSecurityServiceRequest); i { case 0: return &v.state @@ -201913,7 +201913,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[547].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[547].Exporter = func(v any, i int) any { switch v := v.(*InsertNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -201925,7 +201925,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[548].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[548].Exporter = func(v any, i int) any { switch v := v.(*InsertNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -201937,7 +201937,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[549].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[549].Exporter = func(v any, i int) any { switch v := v.(*InsertNetworkRequest); i { case 0: return &v.state @@ -201949,7 +201949,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[550].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[550].Exporter = func(v any, i int) any { switch v := v.(*InsertNodeGroupRequest); i { case 0: return &v.state @@ -201961,7 +201961,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[551].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[551].Exporter = func(v any, i int) any { switch v := v.(*InsertNodeTemplateRequest); i { case 0: return &v.state @@ -201973,7 +201973,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[552].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[552].Exporter = func(v any, i int) any { switch v := v.(*InsertPacketMirroringRequest); i { case 0: return &v.state @@ -201985,7 +201985,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[553].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[553].Exporter = func(v any, i int) any { switch v := v.(*InsertPublicAdvertisedPrefixeRequest); i { case 0: return &v.state @@ -201997,7 +201997,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[554].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[554].Exporter = func(v any, i int) any { switch v := v.(*InsertPublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -202009,7 +202009,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[555].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[555].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionAutoscalerRequest); i { case 0: return &v.state @@ -202021,7 +202021,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[556].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[556].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionBackendServiceRequest); i { case 0: return &v.state @@ -202033,7 +202033,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[557].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[557].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionCommitmentRequest); i { case 0: return &v.state @@ -202045,7 +202045,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[558].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[558].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionDiskRequest); i { case 0: return &v.state @@ -202057,7 +202057,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[559].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[559].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionHealthCheckRequest); i { case 0: return &v.state @@ -202069,7 +202069,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[560].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[560].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionHealthCheckServiceRequest); i { case 0: return &v.state @@ -202081,7 +202081,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[561].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[561].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -202093,7 +202093,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[562].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[562].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionInstanceTemplateRequest); i { case 0: return &v.state @@ -202105,7 +202105,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[563].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[563].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionInstantSnapshotRequest); i { case 0: return &v.state @@ -202117,7 +202117,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[564].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[564].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -202129,7 +202129,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[565].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[565].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -202141,7 +202141,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[566].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[566].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionNotificationEndpointRequest); i { case 0: return &v.state @@ -202153,7 +202153,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[567].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[567].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionSecurityPolicyRequest); i { case 0: return &v.state @@ -202165,7 +202165,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[568].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[568].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionSslCertificateRequest); i { case 0: return &v.state @@ -202177,7 +202177,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[569].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[569].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionSslPolicyRequest); i { case 0: return &v.state @@ -202189,7 +202189,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[570].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[570].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionTargetHttpProxyRequest); i { case 0: return &v.state @@ -202201,7 +202201,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[571].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[571].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionTargetHttpsProxyRequest); i { case 0: return &v.state @@ -202213,7 +202213,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[572].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[572].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionTargetTcpProxyRequest); i { case 0: return &v.state @@ -202225,7 +202225,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[573].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[573].Exporter = func(v any, i int) any { switch v := v.(*InsertRegionUrlMapRequest); i { case 0: return &v.state @@ -202237,7 +202237,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[574].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[574].Exporter = func(v any, i int) any { switch v := v.(*InsertReservationRequest); i { case 0: return &v.state @@ -202249,7 +202249,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[575].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[575].Exporter = func(v any, i int) any { switch v := v.(*InsertResourcePolicyRequest); i { case 0: return &v.state @@ -202261,7 +202261,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[576].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[576].Exporter = func(v any, i int) any { switch v := v.(*InsertRouteRequest); i { case 0: return &v.state @@ -202273,7 +202273,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[577].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[577].Exporter = func(v any, i int) any { switch v := v.(*InsertRouterRequest); i { case 0: return &v.state @@ -202285,7 +202285,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[578].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[578].Exporter = func(v any, i int) any { switch v := v.(*InsertSecurityPolicyRequest); i { case 0: return &v.state @@ -202297,7 +202297,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[579].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[579].Exporter = func(v any, i int) any { switch v := v.(*InsertServiceAttachmentRequest); i { case 0: return &v.state @@ -202309,7 +202309,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[580].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[580].Exporter = func(v any, i int) any { switch v := v.(*InsertSnapshotRequest); i { case 0: return &v.state @@ -202321,7 +202321,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[581].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[581].Exporter = func(v any, i int) any { switch v := v.(*InsertSslCertificateRequest); i { case 0: return &v.state @@ -202333,7 +202333,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[582].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[582].Exporter = func(v any, i int) any { switch v := v.(*InsertSslPolicyRequest); i { case 0: return &v.state @@ -202345,7 +202345,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[583].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[583].Exporter = func(v any, i int) any { switch v := v.(*InsertStoragePoolRequest); i { case 0: return &v.state @@ -202357,7 +202357,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[584].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[584].Exporter = func(v any, i int) any { switch v := v.(*InsertSubnetworkRequest); i { case 0: return &v.state @@ -202369,7 +202369,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[585].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[585].Exporter = func(v any, i int) any { switch v := v.(*InsertTargetGrpcProxyRequest); i { case 0: return &v.state @@ -202381,7 +202381,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[586].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[586].Exporter = func(v any, i int) any { switch v := v.(*InsertTargetHttpProxyRequest); i { case 0: return &v.state @@ -202393,7 +202393,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[587].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[587].Exporter = func(v any, i int) any { switch v := v.(*InsertTargetHttpsProxyRequest); i { case 0: return &v.state @@ -202405,7 +202405,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[588].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[588].Exporter = func(v any, i int) any { switch v := v.(*InsertTargetInstanceRequest); i { case 0: return &v.state @@ -202417,7 +202417,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[589].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[589].Exporter = func(v any, i int) any { switch v := v.(*InsertTargetPoolRequest); i { case 0: return &v.state @@ -202429,7 +202429,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[590].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[590].Exporter = func(v any, i int) any { switch v := v.(*InsertTargetSslProxyRequest); i { case 0: return &v.state @@ -202441,7 +202441,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[591].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[591].Exporter = func(v any, i int) any { switch v := v.(*InsertTargetTcpProxyRequest); i { case 0: return &v.state @@ -202453,7 +202453,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[592].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[592].Exporter = func(v any, i int) any { switch v := v.(*InsertTargetVpnGatewayRequest); i { case 0: return &v.state @@ -202465,7 +202465,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[593].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[593].Exporter = func(v any, i int) any { switch v := v.(*InsertUrlMapRequest); i { case 0: return &v.state @@ -202477,7 +202477,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[594].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[594].Exporter = func(v any, i int) any { switch v := v.(*InsertVpnGatewayRequest); i { case 0: return &v.state @@ -202489,7 +202489,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[595].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[595].Exporter = func(v any, i int) any { switch v := v.(*InsertVpnTunnelRequest); i { case 0: return &v.state @@ -202501,7 +202501,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[596].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[596].Exporter = func(v any, i int) any { switch v := v.(*Instance); i { case 0: return &v.state @@ -202513,7 +202513,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[597].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[597].Exporter = func(v any, i int) any { switch v := v.(*InstanceAggregatedList); i { case 0: return &v.state @@ -202525,7 +202525,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[598].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[598].Exporter = func(v any, i int) any { switch v := v.(*InstanceConsumptionData); i { case 0: return &v.state @@ -202537,7 +202537,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[599].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[599].Exporter = func(v any, i int) any { switch v := v.(*InstanceConsumptionInfo); i { case 0: return &v.state @@ -202549,7 +202549,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[600].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[600].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroup); i { case 0: return &v.state @@ -202561,7 +202561,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[601].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[601].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupAggregatedList); i { case 0: return &v.state @@ -202573,7 +202573,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[602].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[602].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupList); i { case 0: return &v.state @@ -202585,7 +202585,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[603].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[603].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManager); i { case 0: return &v.state @@ -202597,7 +202597,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[604].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[604].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerActionsSummary); i { case 0: return &v.state @@ -202609,7 +202609,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[605].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[605].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerAggregatedList); i { case 0: return &v.state @@ -202621,7 +202621,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[606].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[606].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerAllInstancesConfig); i { case 0: return &v.state @@ -202633,7 +202633,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[607].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[607].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerAutoHealingPolicy); i { case 0: return &v.state @@ -202645,7 +202645,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[608].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[608].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerInstanceLifecyclePolicy); i { case 0: return &v.state @@ -202657,7 +202657,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[609].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[609].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerList); i { case 0: return &v.state @@ -202669,7 +202669,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[610].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[610].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerResizeRequest); i { case 0: return &v.state @@ -202681,7 +202681,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[611].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[611].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerResizeRequestStatus); i { case 0: return &v.state @@ -202693,7 +202693,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[612].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[612].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerResizeRequestStatusLastAttempt); i { case 0: return &v.state @@ -202705,7 +202705,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[613].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[613].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerResizeRequestsListResponse); i { case 0: return &v.state @@ -202717,7 +202717,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[614].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[614].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerStatus); i { case 0: return &v.state @@ -202729,7 +202729,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[615].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[615].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerStatusAllInstancesConfig); i { case 0: return &v.state @@ -202741,7 +202741,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[616].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[616].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerStatusStateful); i { case 0: return &v.state @@ -202753,7 +202753,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[617].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[617].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerStatusStatefulPerInstanceConfigs); i { case 0: return &v.state @@ -202765,7 +202765,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[618].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[618].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerStatusVersionTarget); i { case 0: return &v.state @@ -202777,7 +202777,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[619].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[619].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerUpdatePolicy); i { case 0: return &v.state @@ -202789,7 +202789,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[620].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[620].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagerVersion); i { case 0: return &v.state @@ -202801,7 +202801,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[621].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[621].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersAbandonInstancesRequest); i { case 0: return &v.state @@ -202813,7 +202813,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[622].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[622].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersApplyUpdatesRequest); i { case 0: return &v.state @@ -202825,7 +202825,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[623].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[623].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersCreateInstancesRequest); i { case 0: return &v.state @@ -202837,7 +202837,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[624].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[624].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersDeleteInstancesRequest); i { case 0: return &v.state @@ -202849,7 +202849,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[625].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[625].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersDeletePerInstanceConfigsReq); i { case 0: return &v.state @@ -202861,7 +202861,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[626].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[626].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersListErrorsResponse); i { case 0: return &v.state @@ -202873,7 +202873,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[627].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[627].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersListManagedInstancesResponse); i { case 0: return &v.state @@ -202885,7 +202885,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[628].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[628].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersListPerInstanceConfigsResp); i { case 0: return &v.state @@ -202897,7 +202897,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[629].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[629].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersPatchPerInstanceConfigsReq); i { case 0: return &v.state @@ -202909,7 +202909,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[630].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[630].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersRecreateInstancesRequest); i { case 0: return &v.state @@ -202921,7 +202921,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[631].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[631].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersScopedList); i { case 0: return &v.state @@ -202933,7 +202933,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[632].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[632].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersSetInstanceTemplateRequest); i { case 0: return &v.state @@ -202945,7 +202945,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[633].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[633].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersSetTargetPoolsRequest); i { case 0: return &v.state @@ -202957,7 +202957,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[634].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[634].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupManagersUpdatePerInstanceConfigsReq); i { case 0: return &v.state @@ -202969,7 +202969,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[635].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[635].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupsAddInstancesRequest); i { case 0: return &v.state @@ -202981,7 +202981,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[636].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[636].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupsListInstances); i { case 0: return &v.state @@ -202993,7 +202993,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[637].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[637].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupsListInstancesRequest); i { case 0: return &v.state @@ -203005,7 +203005,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[638].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[638].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupsRemoveInstancesRequest); i { case 0: return &v.state @@ -203017,7 +203017,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[639].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[639].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupsScopedList); i { case 0: return &v.state @@ -203029,7 +203029,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[640].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[640].Exporter = func(v any, i int) any { switch v := v.(*InstanceGroupsSetNamedPortsRequest); i { case 0: return &v.state @@ -203041,7 +203041,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[641].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[641].Exporter = func(v any, i int) any { switch v := v.(*InstanceList); i { case 0: return &v.state @@ -203053,7 +203053,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[642].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[642].Exporter = func(v any, i int) any { switch v := v.(*InstanceListReferrers); i { case 0: return &v.state @@ -203065,7 +203065,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[643].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[643].Exporter = func(v any, i int) any { switch v := v.(*InstanceManagedByIgmError); i { case 0: return &v.state @@ -203077,7 +203077,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[644].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[644].Exporter = func(v any, i int) any { switch v := v.(*InstanceManagedByIgmErrorInstanceActionDetails); i { case 0: return &v.state @@ -203089,7 +203089,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[645].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[645].Exporter = func(v any, i int) any { switch v := v.(*InstanceManagedByIgmErrorManagedInstanceError); i { case 0: return &v.state @@ -203101,7 +203101,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[646].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[646].Exporter = func(v any, i int) any { switch v := v.(*InstanceMoveRequest); i { case 0: return &v.state @@ -203113,7 +203113,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[647].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[647].Exporter = func(v any, i int) any { switch v := v.(*InstanceParams); i { case 0: return &v.state @@ -203125,7 +203125,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[648].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[648].Exporter = func(v any, i int) any { switch v := v.(*InstanceProperties); i { case 0: return &v.state @@ -203137,7 +203137,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[649].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[649].Exporter = func(v any, i int) any { switch v := v.(*InstancePropertiesPatch); i { case 0: return &v.state @@ -203149,7 +203149,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[650].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[650].Exporter = func(v any, i int) any { switch v := v.(*InstanceReference); i { case 0: return &v.state @@ -203161,7 +203161,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[651].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[651].Exporter = func(v any, i int) any { switch v := v.(*InstanceSettings); i { case 0: return &v.state @@ -203173,7 +203173,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[652].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[652].Exporter = func(v any, i int) any { switch v := v.(*InstanceSettingsMetadata); i { case 0: return &v.state @@ -203185,7 +203185,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[653].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[653].Exporter = func(v any, i int) any { switch v := v.(*InstanceTemplate); i { case 0: return &v.state @@ -203197,7 +203197,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[654].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[654].Exporter = func(v any, i int) any { switch v := v.(*InstanceTemplateAggregatedList); i { case 0: return &v.state @@ -203209,7 +203209,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[655].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[655].Exporter = func(v any, i int) any { switch v := v.(*InstanceTemplateList); i { case 0: return &v.state @@ -203221,7 +203221,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[656].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[656].Exporter = func(v any, i int) any { switch v := v.(*InstanceTemplatesScopedList); i { case 0: return &v.state @@ -203233,7 +203233,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[657].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[657].Exporter = func(v any, i int) any { switch v := v.(*InstanceWithNamedPorts); i { case 0: return &v.state @@ -203245,7 +203245,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[658].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[658].Exporter = func(v any, i int) any { switch v := v.(*InstancesAddResourcePoliciesRequest); i { case 0: return &v.state @@ -203257,7 +203257,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[659].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[659].Exporter = func(v any, i int) any { switch v := v.(*InstancesBulkInsertOperationMetadata); i { case 0: return &v.state @@ -203269,7 +203269,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[660].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[660].Exporter = func(v any, i int) any { switch v := v.(*InstancesGetEffectiveFirewallsResponse); i { case 0: return &v.state @@ -203281,7 +203281,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[661].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[661].Exporter = func(v any, i int) any { switch v := v.(*InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy); i { case 0: return &v.state @@ -203293,7 +203293,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[662].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[662].Exporter = func(v any, i int) any { switch v := v.(*InstancesRemoveResourcePoliciesRequest); i { case 0: return &v.state @@ -203305,7 +203305,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[663].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[663].Exporter = func(v any, i int) any { switch v := v.(*InstancesScopedList); i { case 0: return &v.state @@ -203317,7 +203317,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[664].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[664].Exporter = func(v any, i int) any { switch v := v.(*InstancesSetLabelsRequest); i { case 0: return &v.state @@ -203329,7 +203329,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[665].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[665].Exporter = func(v any, i int) any { switch v := v.(*InstancesSetMachineResourcesRequest); i { case 0: return &v.state @@ -203341,7 +203341,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[666].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[666].Exporter = func(v any, i int) any { switch v := v.(*InstancesSetMachineTypeRequest); i { case 0: return &v.state @@ -203353,7 +203353,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[667].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[667].Exporter = func(v any, i int) any { switch v := v.(*InstancesSetMinCpuPlatformRequest); i { case 0: return &v.state @@ -203365,7 +203365,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[668].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[668].Exporter = func(v any, i int) any { switch v := v.(*InstancesSetNameRequest); i { case 0: return &v.state @@ -203377,7 +203377,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[669].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[669].Exporter = func(v any, i int) any { switch v := v.(*InstancesSetSecurityPolicyRequest); i { case 0: return &v.state @@ -203389,7 +203389,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[670].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[670].Exporter = func(v any, i int) any { switch v := v.(*InstancesSetServiceAccountRequest); i { case 0: return &v.state @@ -203401,7 +203401,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[671].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[671].Exporter = func(v any, i int) any { switch v := v.(*InstancesStartWithEncryptionKeyRequest); i { case 0: return &v.state @@ -203413,7 +203413,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[672].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[672].Exporter = func(v any, i int) any { switch v := v.(*InstantSnapshot); i { case 0: return &v.state @@ -203425,7 +203425,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[673].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[673].Exporter = func(v any, i int) any { switch v := v.(*InstantSnapshotAggregatedList); i { case 0: return &v.state @@ -203437,7 +203437,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[674].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[674].Exporter = func(v any, i int) any { switch v := v.(*InstantSnapshotList); i { case 0: return &v.state @@ -203449,7 +203449,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[675].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[675].Exporter = func(v any, i int) any { switch v := v.(*InstantSnapshotResourceStatus); i { case 0: return &v.state @@ -203461,7 +203461,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[676].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[676].Exporter = func(v any, i int) any { switch v := v.(*InstantSnapshotsScopedList); i { case 0: return &v.state @@ -203473,7 +203473,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[677].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[677].Exporter = func(v any, i int) any { switch v := v.(*Int64RangeMatch); i { case 0: return &v.state @@ -203485,7 +203485,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[678].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[678].Exporter = func(v any, i int) any { switch v := v.(*Interconnect); i { case 0: return &v.state @@ -203497,7 +203497,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[679].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[679].Exporter = func(v any, i int) any { switch v := v.(*InterconnectAttachment); i { case 0: return &v.state @@ -203509,7 +203509,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[680].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[680].Exporter = func(v any, i int) any { switch v := v.(*InterconnectAttachmentAggregatedList); i { case 0: return &v.state @@ -203521,7 +203521,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[681].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[681].Exporter = func(v any, i int) any { switch v := v.(*InterconnectAttachmentConfigurationConstraints); i { case 0: return &v.state @@ -203533,7 +203533,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[682].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[682].Exporter = func(v any, i int) any { switch v := v.(*InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange); i { case 0: return &v.state @@ -203545,7 +203545,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[683].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[683].Exporter = func(v any, i int) any { switch v := v.(*InterconnectAttachmentList); i { case 0: return &v.state @@ -203557,7 +203557,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[684].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[684].Exporter = func(v any, i int) any { switch v := v.(*InterconnectAttachmentPartnerMetadata); i { case 0: return &v.state @@ -203569,7 +203569,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[685].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[685].Exporter = func(v any, i int) any { switch v := v.(*InterconnectAttachmentPrivateInfo); i { case 0: return &v.state @@ -203581,7 +203581,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[686].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[686].Exporter = func(v any, i int) any { switch v := v.(*InterconnectAttachmentsScopedList); i { case 0: return &v.state @@ -203593,7 +203593,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[687].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[687].Exporter = func(v any, i int) any { switch v := v.(*InterconnectCircuitInfo); i { case 0: return &v.state @@ -203605,7 +203605,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[688].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[688].Exporter = func(v any, i int) any { switch v := v.(*InterconnectDiagnostics); i { case 0: return &v.state @@ -203617,7 +203617,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[689].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[689].Exporter = func(v any, i int) any { switch v := v.(*InterconnectDiagnosticsARPEntry); i { case 0: return &v.state @@ -203629,7 +203629,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[690].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[690].Exporter = func(v any, i int) any { switch v := v.(*InterconnectDiagnosticsLinkLACPStatus); i { case 0: return &v.state @@ -203641,7 +203641,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[691].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[691].Exporter = func(v any, i int) any { switch v := v.(*InterconnectDiagnosticsLinkOpticalPower); i { case 0: return &v.state @@ -203653,7 +203653,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[692].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[692].Exporter = func(v any, i int) any { switch v := v.(*InterconnectDiagnosticsLinkStatus); i { case 0: return &v.state @@ -203665,7 +203665,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[693].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[693].Exporter = func(v any, i int) any { switch v := v.(*InterconnectDiagnosticsMacsecStatus); i { case 0: return &v.state @@ -203677,7 +203677,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[694].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[694].Exporter = func(v any, i int) any { switch v := v.(*InterconnectList); i { case 0: return &v.state @@ -203689,7 +203689,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[695].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[695].Exporter = func(v any, i int) any { switch v := v.(*InterconnectLocation); i { case 0: return &v.state @@ -203701,7 +203701,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[696].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[696].Exporter = func(v any, i int) any { switch v := v.(*InterconnectLocationList); i { case 0: return &v.state @@ -203713,7 +203713,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[697].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[697].Exporter = func(v any, i int) any { switch v := v.(*InterconnectLocationRegionInfo); i { case 0: return &v.state @@ -203725,7 +203725,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[698].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[698].Exporter = func(v any, i int) any { switch v := v.(*InterconnectMacsec); i { case 0: return &v.state @@ -203737,7 +203737,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[699].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[699].Exporter = func(v any, i int) any { switch v := v.(*InterconnectMacsecConfig); i { case 0: return &v.state @@ -203749,7 +203749,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[700].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[700].Exporter = func(v any, i int) any { switch v := v.(*InterconnectMacsecConfigPreSharedKey); i { case 0: return &v.state @@ -203761,7 +203761,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[701].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[701].Exporter = func(v any, i int) any { switch v := v.(*InterconnectMacsecPreSharedKey); i { case 0: return &v.state @@ -203773,7 +203773,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[702].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[702].Exporter = func(v any, i int) any { switch v := v.(*InterconnectOutageNotification); i { case 0: return &v.state @@ -203785,7 +203785,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[703].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[703].Exporter = func(v any, i int) any { switch v := v.(*InterconnectRemoteLocation); i { case 0: return &v.state @@ -203797,7 +203797,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[704].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[704].Exporter = func(v any, i int) any { switch v := v.(*InterconnectRemoteLocationConstraints); i { case 0: return &v.state @@ -203809,7 +203809,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[705].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[705].Exporter = func(v any, i int) any { switch v := v.(*InterconnectRemoteLocationConstraintsSubnetLengthRange); i { case 0: return &v.state @@ -203821,7 +203821,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[706].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[706].Exporter = func(v any, i int) any { switch v := v.(*InterconnectRemoteLocationList); i { case 0: return &v.state @@ -203833,7 +203833,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[707].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[707].Exporter = func(v any, i int) any { switch v := v.(*InterconnectRemoteLocationPermittedConnections); i { case 0: return &v.state @@ -203845,7 +203845,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[708].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[708].Exporter = func(v any, i int) any { switch v := v.(*InterconnectsGetDiagnosticsResponse); i { case 0: return &v.state @@ -203857,7 +203857,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[709].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[709].Exporter = func(v any, i int) any { switch v := v.(*InterconnectsGetMacsecConfigResponse); i { case 0: return &v.state @@ -203869,7 +203869,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[710].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[710].Exporter = func(v any, i int) any { switch v := v.(*InvalidateCacheUrlMapRequest); i { case 0: return &v.state @@ -203881,7 +203881,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[711].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[711].Exporter = func(v any, i int) any { switch v := v.(*Items); i { case 0: return &v.state @@ -203893,7 +203893,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[712].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[712].Exporter = func(v any, i int) any { switch v := v.(*License); i { case 0: return &v.state @@ -203905,7 +203905,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[713].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[713].Exporter = func(v any, i int) any { switch v := v.(*LicenseCode); i { case 0: return &v.state @@ -203917,7 +203917,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[714].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[714].Exporter = func(v any, i int) any { switch v := v.(*LicenseCodeLicenseAlias); i { case 0: return &v.state @@ -203929,7 +203929,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[715].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[715].Exporter = func(v any, i int) any { switch v := v.(*LicenseResourceCommitment); i { case 0: return &v.state @@ -203941,7 +203941,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[716].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[716].Exporter = func(v any, i int) any { switch v := v.(*LicenseResourceRequirements); i { case 0: return &v.state @@ -203953,7 +203953,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[717].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[717].Exporter = func(v any, i int) any { switch v := v.(*LicensesListResponse); i { case 0: return &v.state @@ -203965,7 +203965,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[718].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[718].Exporter = func(v any, i int) any { switch v := v.(*ListAcceleratorTypesRequest); i { case 0: return &v.state @@ -203977,7 +203977,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[719].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[719].Exporter = func(v any, i int) any { switch v := v.(*ListAddressesRequest); i { case 0: return &v.state @@ -203989,7 +203989,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[720].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[720].Exporter = func(v any, i int) any { switch v := v.(*ListAssociationsFirewallPolicyRequest); i { case 0: return &v.state @@ -204001,7 +204001,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[721].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[721].Exporter = func(v any, i int) any { switch v := v.(*ListAutoscalersRequest); i { case 0: return &v.state @@ -204013,7 +204013,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[722].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[722].Exporter = func(v any, i int) any { switch v := v.(*ListAvailableFeaturesRegionSslPoliciesRequest); i { case 0: return &v.state @@ -204025,7 +204025,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[723].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[723].Exporter = func(v any, i int) any { switch v := v.(*ListAvailableFeaturesSslPoliciesRequest); i { case 0: return &v.state @@ -204037,7 +204037,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[724].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[724].Exporter = func(v any, i int) any { switch v := v.(*ListBackendBucketsRequest); i { case 0: return &v.state @@ -204049,7 +204049,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[725].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[725].Exporter = func(v any, i int) any { switch v := v.(*ListBackendServicesRequest); i { case 0: return &v.state @@ -204061,7 +204061,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[726].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[726].Exporter = func(v any, i int) any { switch v := v.(*ListDiskTypesRequest); i { case 0: return &v.state @@ -204073,7 +204073,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[727].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[727].Exporter = func(v any, i int) any { switch v := v.(*ListDisksRequest); i { case 0: return &v.state @@ -204085,7 +204085,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[728].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[728].Exporter = func(v any, i int) any { switch v := v.(*ListDisksStoragePoolsRequest); i { case 0: return &v.state @@ -204097,7 +204097,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[729].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[729].Exporter = func(v any, i int) any { switch v := v.(*ListErrorsInstanceGroupManagersRequest); i { case 0: return &v.state @@ -204109,7 +204109,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[730].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[730].Exporter = func(v any, i int) any { switch v := v.(*ListErrorsRegionInstanceGroupManagersRequest); i { case 0: return &v.state @@ -204121,7 +204121,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[731].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[731].Exporter = func(v any, i int) any { switch v := v.(*ListExternalVpnGatewaysRequest); i { case 0: return &v.state @@ -204133,7 +204133,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[732].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[732].Exporter = func(v any, i int) any { switch v := v.(*ListFirewallPoliciesRequest); i { case 0: return &v.state @@ -204145,7 +204145,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[733].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[733].Exporter = func(v any, i int) any { switch v := v.(*ListFirewallsRequest); i { case 0: return &v.state @@ -204157,7 +204157,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[734].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[734].Exporter = func(v any, i int) any { switch v := v.(*ListForwardingRulesRequest); i { case 0: return &v.state @@ -204169,7 +204169,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[735].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[735].Exporter = func(v any, i int) any { switch v := v.(*ListGlobalAddressesRequest); i { case 0: return &v.state @@ -204181,7 +204181,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[736].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[736].Exporter = func(v any, i int) any { switch v := v.(*ListGlobalForwardingRulesRequest); i { case 0: return &v.state @@ -204193,7 +204193,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[737].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[737].Exporter = func(v any, i int) any { switch v := v.(*ListGlobalNetworkEndpointGroupsRequest); i { case 0: return &v.state @@ -204205,7 +204205,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[738].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[738].Exporter = func(v any, i int) any { switch v := v.(*ListGlobalOperationsRequest); i { case 0: return &v.state @@ -204217,7 +204217,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[739].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[739].Exporter = func(v any, i int) any { switch v := v.(*ListGlobalOrganizationOperationsRequest); i { case 0: return &v.state @@ -204229,7 +204229,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[740].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[740].Exporter = func(v any, i int) any { switch v := v.(*ListGlobalPublicDelegatedPrefixesRequest); i { case 0: return &v.state @@ -204241,7 +204241,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[741].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[741].Exporter = func(v any, i int) any { switch v := v.(*ListHealthChecksRequest); i { case 0: return &v.state @@ -204253,7 +204253,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[742].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[742].Exporter = func(v any, i int) any { switch v := v.(*ListImagesRequest); i { case 0: return &v.state @@ -204265,7 +204265,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[743].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[743].Exporter = func(v any, i int) any { switch v := v.(*ListInstanceGroupManagerResizeRequestsRequest); i { case 0: return &v.state @@ -204277,7 +204277,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[744].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[744].Exporter = func(v any, i int) any { switch v := v.(*ListInstanceGroupManagersRequest); i { case 0: return &v.state @@ -204289,7 +204289,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[745].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[745].Exporter = func(v any, i int) any { switch v := v.(*ListInstanceGroupsRequest); i { case 0: return &v.state @@ -204301,7 +204301,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[746].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[746].Exporter = func(v any, i int) any { switch v := v.(*ListInstanceTemplatesRequest); i { case 0: return &v.state @@ -204313,7 +204313,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[747].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[747].Exporter = func(v any, i int) any { switch v := v.(*ListInstancesInstanceGroupsRequest); i { case 0: return &v.state @@ -204325,7 +204325,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[748].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[748].Exporter = func(v any, i int) any { switch v := v.(*ListInstancesRegionInstanceGroupsRequest); i { case 0: return &v.state @@ -204337,7 +204337,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[749].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[749].Exporter = func(v any, i int) any { switch v := v.(*ListInstancesRequest); i { case 0: return &v.state @@ -204349,7 +204349,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[750].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[750].Exporter = func(v any, i int) any { switch v := v.(*ListInstantSnapshotsRequest); i { case 0: return &v.state @@ -204361,7 +204361,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[751].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[751].Exporter = func(v any, i int) any { switch v := v.(*ListInterconnectAttachmentsRequest); i { case 0: return &v.state @@ -204373,7 +204373,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[752].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[752].Exporter = func(v any, i int) any { switch v := v.(*ListInterconnectLocationsRequest); i { case 0: return &v.state @@ -204385,7 +204385,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[753].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[753].Exporter = func(v any, i int) any { switch v := v.(*ListInterconnectRemoteLocationsRequest); i { case 0: return &v.state @@ -204397,7 +204397,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[754].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[754].Exporter = func(v any, i int) any { switch v := v.(*ListInterconnectsRequest); i { case 0: return &v.state @@ -204409,7 +204409,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[755].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[755].Exporter = func(v any, i int) any { switch v := v.(*ListLicensesRequest); i { case 0: return &v.state @@ -204421,7 +204421,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[756].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[756].Exporter = func(v any, i int) any { switch v := v.(*ListMachineImagesRequest); i { case 0: return &v.state @@ -204433,7 +204433,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[757].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[757].Exporter = func(v any, i int) any { switch v := v.(*ListMachineTypesRequest); i { case 0: return &v.state @@ -204445,7 +204445,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[758].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[758].Exporter = func(v any, i int) any { switch v := v.(*ListManagedInstancesInstanceGroupManagersRequest); i { case 0: return &v.state @@ -204457,7 +204457,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[759].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[759].Exporter = func(v any, i int) any { switch v := v.(*ListManagedInstancesRegionInstanceGroupManagersRequest); i { case 0: return &v.state @@ -204469,7 +204469,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[760].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[760].Exporter = func(v any, i int) any { switch v := v.(*ListNetworkAttachmentsRequest); i { case 0: return &v.state @@ -204481,7 +204481,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[761].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[761].Exporter = func(v any, i int) any { switch v := v.(*ListNetworkEndpointGroupsRequest); i { case 0: return &v.state @@ -204493,7 +204493,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[762].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[762].Exporter = func(v any, i int) any { switch v := v.(*ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest); i { case 0: return &v.state @@ -204505,7 +204505,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[763].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[763].Exporter = func(v any, i int) any { switch v := v.(*ListNetworkEndpointsNetworkEndpointGroupsRequest); i { case 0: return &v.state @@ -204517,7 +204517,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[764].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[764].Exporter = func(v any, i int) any { switch v := v.(*ListNetworkEndpointsRegionNetworkEndpointGroupsRequest); i { case 0: return &v.state @@ -204529,7 +204529,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[765].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[765].Exporter = func(v any, i int) any { switch v := v.(*ListNetworkFirewallPoliciesRequest); i { case 0: return &v.state @@ -204541,7 +204541,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[766].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[766].Exporter = func(v any, i int) any { switch v := v.(*ListNetworksRequest); i { case 0: return &v.state @@ -204553,7 +204553,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[767].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[767].Exporter = func(v any, i int) any { switch v := v.(*ListNodeGroupsRequest); i { case 0: return &v.state @@ -204565,7 +204565,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[768].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[768].Exporter = func(v any, i int) any { switch v := v.(*ListNodeTemplatesRequest); i { case 0: return &v.state @@ -204577,7 +204577,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[769].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[769].Exporter = func(v any, i int) any { switch v := v.(*ListNodeTypesRequest); i { case 0: return &v.state @@ -204589,7 +204589,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[770].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[770].Exporter = func(v any, i int) any { switch v := v.(*ListNodesNodeGroupsRequest); i { case 0: return &v.state @@ -204601,7 +204601,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[771].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[771].Exporter = func(v any, i int) any { switch v := v.(*ListPacketMirroringsRequest); i { case 0: return &v.state @@ -204613,7 +204613,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[772].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[772].Exporter = func(v any, i int) any { switch v := v.(*ListPeeringRoutesNetworksRequest); i { case 0: return &v.state @@ -204625,7 +204625,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[773].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[773].Exporter = func(v any, i int) any { switch v := v.(*ListPerInstanceConfigsInstanceGroupManagersRequest); i { case 0: return &v.state @@ -204637,7 +204637,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[774].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[774].Exporter = func(v any, i int) any { switch v := v.(*ListPerInstanceConfigsRegionInstanceGroupManagersRequest); i { case 0: return &v.state @@ -204649,7 +204649,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[775].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[775].Exporter = func(v any, i int) any { switch v := v.(*ListPreconfiguredExpressionSetsSecurityPoliciesRequest); i { case 0: return &v.state @@ -204661,7 +204661,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[776].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[776].Exporter = func(v any, i int) any { switch v := v.(*ListPublicAdvertisedPrefixesRequest); i { case 0: return &v.state @@ -204673,7 +204673,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[777].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[777].Exporter = func(v any, i int) any { switch v := v.(*ListPublicDelegatedPrefixesRequest); i { case 0: return &v.state @@ -204685,7 +204685,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[778].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[778].Exporter = func(v any, i int) any { switch v := v.(*ListReferrersInstancesRequest); i { case 0: return &v.state @@ -204697,7 +204697,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[779].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[779].Exporter = func(v any, i int) any { switch v := v.(*ListRegionAutoscalersRequest); i { case 0: return &v.state @@ -204709,7 +204709,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[780].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[780].Exporter = func(v any, i int) any { switch v := v.(*ListRegionBackendServicesRequest); i { case 0: return &v.state @@ -204721,7 +204721,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[781].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[781].Exporter = func(v any, i int) any { switch v := v.(*ListRegionCommitmentsRequest); i { case 0: return &v.state @@ -204733,7 +204733,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[782].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[782].Exporter = func(v any, i int) any { switch v := v.(*ListRegionDiskTypesRequest); i { case 0: return &v.state @@ -204745,7 +204745,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[783].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[783].Exporter = func(v any, i int) any { switch v := v.(*ListRegionDisksRequest); i { case 0: return &v.state @@ -204757,7 +204757,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[784].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[784].Exporter = func(v any, i int) any { switch v := v.(*ListRegionHealthCheckServicesRequest); i { case 0: return &v.state @@ -204769,7 +204769,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[785].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[785].Exporter = func(v any, i int) any { switch v := v.(*ListRegionHealthChecksRequest); i { case 0: return &v.state @@ -204781,7 +204781,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[786].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[786].Exporter = func(v any, i int) any { switch v := v.(*ListRegionInstanceGroupManagersRequest); i { case 0: return &v.state @@ -204793,7 +204793,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[787].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[787].Exporter = func(v any, i int) any { switch v := v.(*ListRegionInstanceGroupsRequest); i { case 0: return &v.state @@ -204805,7 +204805,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[788].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[788].Exporter = func(v any, i int) any { switch v := v.(*ListRegionInstanceTemplatesRequest); i { case 0: return &v.state @@ -204817,7 +204817,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[789].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[789].Exporter = func(v any, i int) any { switch v := v.(*ListRegionInstantSnapshotsRequest); i { case 0: return &v.state @@ -204829,7 +204829,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[790].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[790].Exporter = func(v any, i int) any { switch v := v.(*ListRegionNetworkEndpointGroupsRequest); i { case 0: return &v.state @@ -204841,7 +204841,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[791].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[791].Exporter = func(v any, i int) any { switch v := v.(*ListRegionNetworkFirewallPoliciesRequest); i { case 0: return &v.state @@ -204853,7 +204853,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[792].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[792].Exporter = func(v any, i int) any { switch v := v.(*ListRegionNotificationEndpointsRequest); i { case 0: return &v.state @@ -204865,7 +204865,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[793].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[793].Exporter = func(v any, i int) any { switch v := v.(*ListRegionOperationsRequest); i { case 0: return &v.state @@ -204877,7 +204877,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[794].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[794].Exporter = func(v any, i int) any { switch v := v.(*ListRegionSecurityPoliciesRequest); i { case 0: return &v.state @@ -204889,7 +204889,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[795].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[795].Exporter = func(v any, i int) any { switch v := v.(*ListRegionSslCertificatesRequest); i { case 0: return &v.state @@ -204901,7 +204901,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[796].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[796].Exporter = func(v any, i int) any { switch v := v.(*ListRegionSslPoliciesRequest); i { case 0: return &v.state @@ -204913,7 +204913,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[797].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[797].Exporter = func(v any, i int) any { switch v := v.(*ListRegionTargetHttpProxiesRequest); i { case 0: return &v.state @@ -204925,7 +204925,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[798].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[798].Exporter = func(v any, i int) any { switch v := v.(*ListRegionTargetHttpsProxiesRequest); i { case 0: return &v.state @@ -204937,7 +204937,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[799].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[799].Exporter = func(v any, i int) any { switch v := v.(*ListRegionTargetTcpProxiesRequest); i { case 0: return &v.state @@ -204949,7 +204949,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[800].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[800].Exporter = func(v any, i int) any { switch v := v.(*ListRegionUrlMapsRequest); i { case 0: return &v.state @@ -204961,7 +204961,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[801].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[801].Exporter = func(v any, i int) any { switch v := v.(*ListRegionZonesRequest); i { case 0: return &v.state @@ -204973,7 +204973,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[802].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[802].Exporter = func(v any, i int) any { switch v := v.(*ListRegionsRequest); i { case 0: return &v.state @@ -204985,7 +204985,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[803].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[803].Exporter = func(v any, i int) any { switch v := v.(*ListReservationsRequest); i { case 0: return &v.state @@ -204997,7 +204997,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[804].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[804].Exporter = func(v any, i int) any { switch v := v.(*ListResourcePoliciesRequest); i { case 0: return &v.state @@ -205009,7 +205009,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[805].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[805].Exporter = func(v any, i int) any { switch v := v.(*ListRoutersRequest); i { case 0: return &v.state @@ -205021,7 +205021,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[806].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[806].Exporter = func(v any, i int) any { switch v := v.(*ListRoutesRequest); i { case 0: return &v.state @@ -205033,7 +205033,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[807].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[807].Exporter = func(v any, i int) any { switch v := v.(*ListSecurityPoliciesRequest); i { case 0: return &v.state @@ -205045,7 +205045,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[808].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[808].Exporter = func(v any, i int) any { switch v := v.(*ListServiceAttachmentsRequest); i { case 0: return &v.state @@ -205057,7 +205057,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[809].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[809].Exporter = func(v any, i int) any { switch v := v.(*ListSnapshotsRequest); i { case 0: return &v.state @@ -205069,7 +205069,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[810].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[810].Exporter = func(v any, i int) any { switch v := v.(*ListSslCertificatesRequest); i { case 0: return &v.state @@ -205081,7 +205081,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[811].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[811].Exporter = func(v any, i int) any { switch v := v.(*ListSslPoliciesRequest); i { case 0: return &v.state @@ -205093,7 +205093,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[812].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[812].Exporter = func(v any, i int) any { switch v := v.(*ListStoragePoolTypesRequest); i { case 0: return &v.state @@ -205105,7 +205105,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[813].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[813].Exporter = func(v any, i int) any { switch v := v.(*ListStoragePoolsRequest); i { case 0: return &v.state @@ -205117,7 +205117,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[814].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[814].Exporter = func(v any, i int) any { switch v := v.(*ListSubnetworksRequest); i { case 0: return &v.state @@ -205129,7 +205129,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[815].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[815].Exporter = func(v any, i int) any { switch v := v.(*ListTargetGrpcProxiesRequest); i { case 0: return &v.state @@ -205141,7 +205141,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[816].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[816].Exporter = func(v any, i int) any { switch v := v.(*ListTargetHttpProxiesRequest); i { case 0: return &v.state @@ -205153,7 +205153,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[817].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[817].Exporter = func(v any, i int) any { switch v := v.(*ListTargetHttpsProxiesRequest); i { case 0: return &v.state @@ -205165,7 +205165,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[818].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[818].Exporter = func(v any, i int) any { switch v := v.(*ListTargetInstancesRequest); i { case 0: return &v.state @@ -205177,7 +205177,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[819].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[819].Exporter = func(v any, i int) any { switch v := v.(*ListTargetPoolsRequest); i { case 0: return &v.state @@ -205189,7 +205189,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[820].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[820].Exporter = func(v any, i int) any { switch v := v.(*ListTargetSslProxiesRequest); i { case 0: return &v.state @@ -205201,7 +205201,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[821].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[821].Exporter = func(v any, i int) any { switch v := v.(*ListTargetTcpProxiesRequest); i { case 0: return &v.state @@ -205213,7 +205213,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[822].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[822].Exporter = func(v any, i int) any { switch v := v.(*ListTargetVpnGatewaysRequest); i { case 0: return &v.state @@ -205225,7 +205225,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[823].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[823].Exporter = func(v any, i int) any { switch v := v.(*ListUrlMapsRequest); i { case 0: return &v.state @@ -205237,7 +205237,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[824].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[824].Exporter = func(v any, i int) any { switch v := v.(*ListUsableBackendServicesRequest); i { case 0: return &v.state @@ -205249,7 +205249,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[825].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[825].Exporter = func(v any, i int) any { switch v := v.(*ListUsableRegionBackendServicesRequest); i { case 0: return &v.state @@ -205261,7 +205261,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[826].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[826].Exporter = func(v any, i int) any { switch v := v.(*ListUsableSubnetworksRequest); i { case 0: return &v.state @@ -205273,7 +205273,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[827].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[827].Exporter = func(v any, i int) any { switch v := v.(*ListVpnGatewaysRequest); i { case 0: return &v.state @@ -205285,7 +205285,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[828].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[828].Exporter = func(v any, i int) any { switch v := v.(*ListVpnTunnelsRequest); i { case 0: return &v.state @@ -205297,7 +205297,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[829].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[829].Exporter = func(v any, i int) any { switch v := v.(*ListXpnHostsProjectsRequest); i { case 0: return &v.state @@ -205309,7 +205309,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[830].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[830].Exporter = func(v any, i int) any { switch v := v.(*ListZoneOperationsRequest); i { case 0: return &v.state @@ -205321,7 +205321,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[831].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[831].Exporter = func(v any, i int) any { switch v := v.(*ListZonesRequest); i { case 0: return &v.state @@ -205333,7 +205333,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[832].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[832].Exporter = func(v any, i int) any { switch v := v.(*LocalDisk); i { case 0: return &v.state @@ -205345,7 +205345,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[833].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[833].Exporter = func(v any, i int) any { switch v := v.(*LocalizedMessage); i { case 0: return &v.state @@ -205357,7 +205357,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[834].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[834].Exporter = func(v any, i int) any { switch v := v.(*LocationPolicy); i { case 0: return &v.state @@ -205369,7 +205369,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[835].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[835].Exporter = func(v any, i int) any { switch v := v.(*LocationPolicyLocation); i { case 0: return &v.state @@ -205381,7 +205381,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[836].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[836].Exporter = func(v any, i int) any { switch v := v.(*LocationPolicyLocationConstraints); i { case 0: return &v.state @@ -205393,7 +205393,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[837].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[837].Exporter = func(v any, i int) any { switch v := v.(*LogConfig); i { case 0: return &v.state @@ -205405,7 +205405,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[838].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[838].Exporter = func(v any, i int) any { switch v := v.(*LogConfigCloudAuditOptions); i { case 0: return &v.state @@ -205417,7 +205417,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[839].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[839].Exporter = func(v any, i int) any { switch v := v.(*LogConfigCounterOptions); i { case 0: return &v.state @@ -205429,7 +205429,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[840].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[840].Exporter = func(v any, i int) any { switch v := v.(*LogConfigCounterOptionsCustomField); i { case 0: return &v.state @@ -205441,7 +205441,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[841].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[841].Exporter = func(v any, i int) any { switch v := v.(*LogConfigDataAccessOptions); i { case 0: return &v.state @@ -205453,7 +205453,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[842].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[842].Exporter = func(v any, i int) any { switch v := v.(*MachineImage); i { case 0: return &v.state @@ -205465,7 +205465,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[843].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[843].Exporter = func(v any, i int) any { switch v := v.(*MachineImageList); i { case 0: return &v.state @@ -205477,7 +205477,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[844].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[844].Exporter = func(v any, i int) any { switch v := v.(*MachineType); i { case 0: return &v.state @@ -205489,7 +205489,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[845].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[845].Exporter = func(v any, i int) any { switch v := v.(*MachineTypeAggregatedList); i { case 0: return &v.state @@ -205501,7 +205501,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[846].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[846].Exporter = func(v any, i int) any { switch v := v.(*MachineTypeList); i { case 0: return &v.state @@ -205513,7 +205513,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[847].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[847].Exporter = func(v any, i int) any { switch v := v.(*MachineTypesScopedList); i { case 0: return &v.state @@ -205525,7 +205525,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[848].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[848].Exporter = func(v any, i int) any { switch v := v.(*ManagedInstance); i { case 0: return &v.state @@ -205537,7 +205537,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[849].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[849].Exporter = func(v any, i int) any { switch v := v.(*ManagedInstanceInstanceHealth); i { case 0: return &v.state @@ -205549,7 +205549,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[850].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[850].Exporter = func(v any, i int) any { switch v := v.(*ManagedInstanceLastAttempt); i { case 0: return &v.state @@ -205561,7 +205561,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[851].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[851].Exporter = func(v any, i int) any { switch v := v.(*ManagedInstanceVersion); i { case 0: return &v.state @@ -205573,7 +205573,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[852].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[852].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -205585,7 +205585,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[853].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[853].Exporter = func(v any, i int) any { switch v := v.(*MetadataFilter); i { case 0: return &v.state @@ -205597,7 +205597,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[854].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[854].Exporter = func(v any, i int) any { switch v := v.(*MetadataFilterLabelMatch); i { case 0: return &v.state @@ -205609,7 +205609,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[855].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[855].Exporter = func(v any, i int) any { switch v := v.(*MoveAddressRequest); i { case 0: return &v.state @@ -205621,7 +205621,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[856].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[856].Exporter = func(v any, i int) any { switch v := v.(*MoveDiskProjectRequest); i { case 0: return &v.state @@ -205633,7 +205633,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[857].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[857].Exporter = func(v any, i int) any { switch v := v.(*MoveFirewallPolicyRequest); i { case 0: return &v.state @@ -205645,7 +205645,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[858].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[858].Exporter = func(v any, i int) any { switch v := v.(*MoveGlobalAddressRequest); i { case 0: return &v.state @@ -205657,7 +205657,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[859].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[859].Exporter = func(v any, i int) any { switch v := v.(*MoveInstanceProjectRequest); i { case 0: return &v.state @@ -205669,7 +205669,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[860].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[860].Exporter = func(v any, i int) any { switch v := v.(*NamedPort); i { case 0: return &v.state @@ -205681,7 +205681,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[861].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[861].Exporter = func(v any, i int) any { switch v := v.(*NatIpInfo); i { case 0: return &v.state @@ -205693,7 +205693,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[862].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[862].Exporter = func(v any, i int) any { switch v := v.(*NatIpInfoNatIpInfoMapping); i { case 0: return &v.state @@ -205705,7 +205705,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[863].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[863].Exporter = func(v any, i int) any { switch v := v.(*NatIpInfoResponse); i { case 0: return &v.state @@ -205717,7 +205717,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[864].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[864].Exporter = func(v any, i int) any { switch v := v.(*Network); i { case 0: return &v.state @@ -205729,7 +205729,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[865].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[865].Exporter = func(v any, i int) any { switch v := v.(*NetworkAttachment); i { case 0: return &v.state @@ -205741,7 +205741,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[866].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[866].Exporter = func(v any, i int) any { switch v := v.(*NetworkAttachmentAggregatedList); i { case 0: return &v.state @@ -205753,7 +205753,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[867].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[867].Exporter = func(v any, i int) any { switch v := v.(*NetworkAttachmentConnectedEndpoint); i { case 0: return &v.state @@ -205765,7 +205765,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[868].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[868].Exporter = func(v any, i int) any { switch v := v.(*NetworkAttachmentList); i { case 0: return &v.state @@ -205777,7 +205777,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[869].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[869].Exporter = func(v any, i int) any { switch v := v.(*NetworkAttachmentsScopedList); i { case 0: return &v.state @@ -205789,7 +205789,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[870].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[870].Exporter = func(v any, i int) any { switch v := v.(*NetworkEdgeSecurityService); i { case 0: return &v.state @@ -205801,7 +205801,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[871].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[871].Exporter = func(v any, i int) any { switch v := v.(*NetworkEdgeSecurityServiceAggregatedList); i { case 0: return &v.state @@ -205813,7 +205813,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[872].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[872].Exporter = func(v any, i int) any { switch v := v.(*NetworkEdgeSecurityServicesScopedList); i { case 0: return &v.state @@ -205825,7 +205825,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[873].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[873].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpoint); i { case 0: return &v.state @@ -205837,7 +205837,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[874].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[874].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroup); i { case 0: return &v.state @@ -205849,7 +205849,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[875].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[875].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupAggregatedList); i { case 0: return &v.state @@ -205861,7 +205861,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[876].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[876].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupAppEngine); i { case 0: return &v.state @@ -205873,7 +205873,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[877].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[877].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupCloudFunction); i { case 0: return &v.state @@ -205885,7 +205885,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[878].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[878].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupCloudRun); i { case 0: return &v.state @@ -205897,7 +205897,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[879].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[879].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupList); i { case 0: return &v.state @@ -205909,7 +205909,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[880].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[880].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupPscData); i { case 0: return &v.state @@ -205921,7 +205921,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[881].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[881].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupsAttachEndpointsRequest); i { case 0: return &v.state @@ -205933,7 +205933,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[882].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[882].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupsDetachEndpointsRequest); i { case 0: return &v.state @@ -205945,7 +205945,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[883].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[883].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupsListEndpointsRequest); i { case 0: return &v.state @@ -205957,7 +205957,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[884].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[884].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupsListNetworkEndpoints); i { case 0: return &v.state @@ -205969,7 +205969,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[885].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[885].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointGroupsScopedList); i { case 0: return &v.state @@ -205981,7 +205981,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[886].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[886].Exporter = func(v any, i int) any { switch v := v.(*NetworkEndpointWithHealthStatus); i { case 0: return &v.state @@ -205993,7 +205993,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[887].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[887].Exporter = func(v any, i int) any { switch v := v.(*NetworkInterface); i { case 0: return &v.state @@ -206005,7 +206005,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[888].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[888].Exporter = func(v any, i int) any { switch v := v.(*NetworkList); i { case 0: return &v.state @@ -206017,7 +206017,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[889].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[889].Exporter = func(v any, i int) any { switch v := v.(*NetworkPeering); i { case 0: return &v.state @@ -206029,7 +206029,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[890].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[890].Exporter = func(v any, i int) any { switch v := v.(*NetworkPerformanceConfig); i { case 0: return &v.state @@ -206041,7 +206041,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[891].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[891].Exporter = func(v any, i int) any { switch v := v.(*NetworkRoutingConfig); i { case 0: return &v.state @@ -206053,7 +206053,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[892].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[892].Exporter = func(v any, i int) any { switch v := v.(*NetworksAddPeeringRequest); i { case 0: return &v.state @@ -206065,7 +206065,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[893].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[893].Exporter = func(v any, i int) any { switch v := v.(*NetworksGetEffectiveFirewallsResponse); i { case 0: return &v.state @@ -206077,7 +206077,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[894].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[894].Exporter = func(v any, i int) any { switch v := v.(*NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy); i { case 0: return &v.state @@ -206089,7 +206089,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[895].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[895].Exporter = func(v any, i int) any { switch v := v.(*NetworksRemovePeeringRequest); i { case 0: return &v.state @@ -206101,7 +206101,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[896].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[896].Exporter = func(v any, i int) any { switch v := v.(*NetworksUpdatePeeringRequest); i { case 0: return &v.state @@ -206113,7 +206113,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[897].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[897].Exporter = func(v any, i int) any { switch v := v.(*NodeGroup); i { case 0: return &v.state @@ -206125,7 +206125,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[898].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[898].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupAggregatedList); i { case 0: return &v.state @@ -206137,7 +206137,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[899].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[899].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupAutoscalingPolicy); i { case 0: return &v.state @@ -206149,7 +206149,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[900].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[900].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupList); i { case 0: return &v.state @@ -206161,7 +206161,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[901].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[901].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupMaintenanceWindow); i { case 0: return &v.state @@ -206173,7 +206173,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[902].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[902].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupNode); i { case 0: return &v.state @@ -206185,7 +206185,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[903].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[903].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupsAddNodesRequest); i { case 0: return &v.state @@ -206197,7 +206197,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[904].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[904].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupsDeleteNodesRequest); i { case 0: return &v.state @@ -206209,7 +206209,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[905].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[905].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupsListNodes); i { case 0: return &v.state @@ -206221,7 +206221,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[906].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[906].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupsPerformMaintenanceRequest); i { case 0: return &v.state @@ -206233,7 +206233,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[907].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[907].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupsScopedList); i { case 0: return &v.state @@ -206245,7 +206245,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[908].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[908].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupsSetNodeTemplateRequest); i { case 0: return &v.state @@ -206257,7 +206257,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[909].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[909].Exporter = func(v any, i int) any { switch v := v.(*NodeGroupsSimulateMaintenanceEventRequest); i { case 0: return &v.state @@ -206269,7 +206269,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[910].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[910].Exporter = func(v any, i int) any { switch v := v.(*NodeTemplate); i { case 0: return &v.state @@ -206281,7 +206281,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[911].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[911].Exporter = func(v any, i int) any { switch v := v.(*NodeTemplateAggregatedList); i { case 0: return &v.state @@ -206293,7 +206293,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[912].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[912].Exporter = func(v any, i int) any { switch v := v.(*NodeTemplateList); i { case 0: return &v.state @@ -206305,7 +206305,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[913].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[913].Exporter = func(v any, i int) any { switch v := v.(*NodeTemplateNodeTypeFlexibility); i { case 0: return &v.state @@ -206317,7 +206317,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[914].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[914].Exporter = func(v any, i int) any { switch v := v.(*NodeTemplatesScopedList); i { case 0: return &v.state @@ -206329,7 +206329,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[915].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[915].Exporter = func(v any, i int) any { switch v := v.(*NodeType); i { case 0: return &v.state @@ -206341,7 +206341,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[916].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[916].Exporter = func(v any, i int) any { switch v := v.(*NodeTypeAggregatedList); i { case 0: return &v.state @@ -206353,7 +206353,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[917].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[917].Exporter = func(v any, i int) any { switch v := v.(*NodeTypeList); i { case 0: return &v.state @@ -206365,7 +206365,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[918].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[918].Exporter = func(v any, i int) any { switch v := v.(*NodeTypesScopedList); i { case 0: return &v.state @@ -206377,7 +206377,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[919].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[919].Exporter = func(v any, i int) any { switch v := v.(*NotificationEndpoint); i { case 0: return &v.state @@ -206389,7 +206389,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[920].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[920].Exporter = func(v any, i int) any { switch v := v.(*NotificationEndpointGrpcSettings); i { case 0: return &v.state @@ -206401,7 +206401,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[921].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[921].Exporter = func(v any, i int) any { switch v := v.(*NotificationEndpointList); i { case 0: return &v.state @@ -206413,7 +206413,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[922].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[922].Exporter = func(v any, i int) any { switch v := v.(*Operation); i { case 0: return &v.state @@ -206425,7 +206425,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[923].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[923].Exporter = func(v any, i int) any { switch v := v.(*OperationAggregatedList); i { case 0: return &v.state @@ -206437,7 +206437,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[924].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[924].Exporter = func(v any, i int) any { switch v := v.(*OperationList); i { case 0: return &v.state @@ -206449,7 +206449,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[925].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[925].Exporter = func(v any, i int) any { switch v := v.(*OperationsScopedList); i { case 0: return &v.state @@ -206461,7 +206461,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[926].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[926].Exporter = func(v any, i int) any { switch v := v.(*OutlierDetection); i { case 0: return &v.state @@ -206473,7 +206473,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[927].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[927].Exporter = func(v any, i int) any { switch v := v.(*PacketIntervals); i { case 0: return &v.state @@ -206485,7 +206485,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[928].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[928].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroring); i { case 0: return &v.state @@ -206497,7 +206497,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[929].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[929].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringAggregatedList); i { case 0: return &v.state @@ -206509,7 +206509,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[930].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[930].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringFilter); i { case 0: return &v.state @@ -206521,7 +206521,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[931].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[931].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringForwardingRuleInfo); i { case 0: return &v.state @@ -206533,7 +206533,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[932].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[932].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringList); i { case 0: return &v.state @@ -206545,7 +206545,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[933].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[933].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringMirroredResourceInfo); i { case 0: return &v.state @@ -206557,7 +206557,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[934].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[934].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringMirroredResourceInfoInstanceInfo); i { case 0: return &v.state @@ -206569,7 +206569,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[935].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[935].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringMirroredResourceInfoSubnetInfo); i { case 0: return &v.state @@ -206581,7 +206581,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[936].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[936].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringNetworkInfo); i { case 0: return &v.state @@ -206593,7 +206593,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[937].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[937].Exporter = func(v any, i int) any { switch v := v.(*PacketMirroringsScopedList); i { case 0: return &v.state @@ -206605,7 +206605,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[938].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[938].Exporter = func(v any, i int) any { switch v := v.(*PatchAutoscalerRequest); i { case 0: return &v.state @@ -206617,7 +206617,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[939].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[939].Exporter = func(v any, i int) any { switch v := v.(*PatchBackendBucketRequest); i { case 0: return &v.state @@ -206629,7 +206629,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[940].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[940].Exporter = func(v any, i int) any { switch v := v.(*PatchBackendServiceRequest); i { case 0: return &v.state @@ -206641,7 +206641,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[941].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[941].Exporter = func(v any, i int) any { switch v := v.(*PatchFirewallPolicyRequest); i { case 0: return &v.state @@ -206653,7 +206653,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[942].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[942].Exporter = func(v any, i int) any { switch v := v.(*PatchFirewallRequest); i { case 0: return &v.state @@ -206665,7 +206665,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[943].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[943].Exporter = func(v any, i int) any { switch v := v.(*PatchForwardingRuleRequest); i { case 0: return &v.state @@ -206677,7 +206677,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[944].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[944].Exporter = func(v any, i int) any { switch v := v.(*PatchGlobalForwardingRuleRequest); i { case 0: return &v.state @@ -206689,7 +206689,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[945].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[945].Exporter = func(v any, i int) any { switch v := v.(*PatchGlobalPublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -206701,7 +206701,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[946].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[946].Exporter = func(v any, i int) any { switch v := v.(*PatchHealthCheckRequest); i { case 0: return &v.state @@ -206713,7 +206713,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[947].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[947].Exporter = func(v any, i int) any { switch v := v.(*PatchImageRequest); i { case 0: return &v.state @@ -206725,7 +206725,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[948].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[948].Exporter = func(v any, i int) any { switch v := v.(*PatchInstanceGroupManagerRequest); i { case 0: return &v.state @@ -206737,7 +206737,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[949].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[949].Exporter = func(v any, i int) any { switch v := v.(*PatchInstanceSettingRequest); i { case 0: return &v.state @@ -206749,7 +206749,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[950].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[950].Exporter = func(v any, i int) any { switch v := v.(*PatchInterconnectAttachmentRequest); i { case 0: return &v.state @@ -206761,7 +206761,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[951].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[951].Exporter = func(v any, i int) any { switch v := v.(*PatchInterconnectRequest); i { case 0: return &v.state @@ -206773,7 +206773,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[952].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[952].Exporter = func(v any, i int) any { switch v := v.(*PatchNetworkAttachmentRequest); i { case 0: return &v.state @@ -206785,7 +206785,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[953].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[953].Exporter = func(v any, i int) any { switch v := v.(*PatchNetworkEdgeSecurityServiceRequest); i { case 0: return &v.state @@ -206797,7 +206797,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[954].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[954].Exporter = func(v any, i int) any { switch v := v.(*PatchNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -206809,7 +206809,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[955].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[955].Exporter = func(v any, i int) any { switch v := v.(*PatchNetworkRequest); i { case 0: return &v.state @@ -206821,7 +206821,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[956].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[956].Exporter = func(v any, i int) any { switch v := v.(*PatchNodeGroupRequest); i { case 0: return &v.state @@ -206833,7 +206833,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[957].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[957].Exporter = func(v any, i int) any { switch v := v.(*PatchPacketMirroringRequest); i { case 0: return &v.state @@ -206845,7 +206845,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[958].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[958].Exporter = func(v any, i int) any { switch v := v.(*PatchPerInstanceConfigsInstanceGroupManagerRequest); i { case 0: return &v.state @@ -206857,7 +206857,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[959].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[959].Exporter = func(v any, i int) any { switch v := v.(*PatchPerInstanceConfigsRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -206869,7 +206869,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[960].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[960].Exporter = func(v any, i int) any { switch v := v.(*PatchPublicAdvertisedPrefixeRequest); i { case 0: return &v.state @@ -206881,7 +206881,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[961].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[961].Exporter = func(v any, i int) any { switch v := v.(*PatchPublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -206893,7 +206893,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[962].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[962].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionAutoscalerRequest); i { case 0: return &v.state @@ -206905,7 +206905,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[963].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[963].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionBackendServiceRequest); i { case 0: return &v.state @@ -206917,7 +206917,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[964].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[964].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionHealthCheckRequest); i { case 0: return &v.state @@ -206929,7 +206929,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[965].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[965].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionHealthCheckServiceRequest); i { case 0: return &v.state @@ -206941,7 +206941,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[966].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[966].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -206953,7 +206953,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[967].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[967].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -206965,7 +206965,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[968].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[968].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionSecurityPolicyRequest); i { case 0: return &v.state @@ -206977,7 +206977,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[969].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[969].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionSslPolicyRequest); i { case 0: return &v.state @@ -206989,7 +206989,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[970].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[970].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionTargetHttpsProxyRequest); i { case 0: return &v.state @@ -207001,7 +207001,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[971].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[971].Exporter = func(v any, i int) any { switch v := v.(*PatchRegionUrlMapRequest); i { case 0: return &v.state @@ -207013,7 +207013,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[972].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[972].Exporter = func(v any, i int) any { switch v := v.(*PatchResourcePolicyRequest); i { case 0: return &v.state @@ -207025,7 +207025,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[973].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[973].Exporter = func(v any, i int) any { switch v := v.(*PatchRouterRequest); i { case 0: return &v.state @@ -207037,7 +207037,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[974].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[974].Exporter = func(v any, i int) any { switch v := v.(*PatchRuleFirewallPolicyRequest); i { case 0: return &v.state @@ -207049,7 +207049,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[975].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[975].Exporter = func(v any, i int) any { switch v := v.(*PatchRuleNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -207061,7 +207061,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[976].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[976].Exporter = func(v any, i int) any { switch v := v.(*PatchRuleRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -207073,7 +207073,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[977].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[977].Exporter = func(v any, i int) any { switch v := v.(*PatchRuleRegionSecurityPolicyRequest); i { case 0: return &v.state @@ -207085,7 +207085,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[978].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[978].Exporter = func(v any, i int) any { switch v := v.(*PatchRuleSecurityPolicyRequest); i { case 0: return &v.state @@ -207097,7 +207097,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[979].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[979].Exporter = func(v any, i int) any { switch v := v.(*PatchSecurityPolicyRequest); i { case 0: return &v.state @@ -207109,7 +207109,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[980].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[980].Exporter = func(v any, i int) any { switch v := v.(*PatchServiceAttachmentRequest); i { case 0: return &v.state @@ -207121,7 +207121,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[981].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[981].Exporter = func(v any, i int) any { switch v := v.(*PatchSnapshotSettingRequest); i { case 0: return &v.state @@ -207133,7 +207133,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[982].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[982].Exporter = func(v any, i int) any { switch v := v.(*PatchSslPolicyRequest); i { case 0: return &v.state @@ -207145,7 +207145,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[983].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[983].Exporter = func(v any, i int) any { switch v := v.(*PatchSubnetworkRequest); i { case 0: return &v.state @@ -207157,7 +207157,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[984].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[984].Exporter = func(v any, i int) any { switch v := v.(*PatchTargetGrpcProxyRequest); i { case 0: return &v.state @@ -207169,7 +207169,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[985].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[985].Exporter = func(v any, i int) any { switch v := v.(*PatchTargetHttpProxyRequest); i { case 0: return &v.state @@ -207181,7 +207181,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[986].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[986].Exporter = func(v any, i int) any { switch v := v.(*PatchTargetHttpsProxyRequest); i { case 0: return &v.state @@ -207193,7 +207193,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[987].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[987].Exporter = func(v any, i int) any { switch v := v.(*PatchUrlMapRequest); i { case 0: return &v.state @@ -207205,7 +207205,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[988].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[988].Exporter = func(v any, i int) any { switch v := v.(*PathMatcher); i { case 0: return &v.state @@ -207217,7 +207217,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[989].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[989].Exporter = func(v any, i int) any { switch v := v.(*PathRule); i { case 0: return &v.state @@ -207229,7 +207229,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[990].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[990].Exporter = func(v any, i int) any { switch v := v.(*PerInstanceConfig); i { case 0: return &v.state @@ -207241,7 +207241,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[991].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[991].Exporter = func(v any, i int) any { switch v := v.(*PerformMaintenanceInstanceRequest); i { case 0: return &v.state @@ -207253,7 +207253,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[992].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[992].Exporter = func(v any, i int) any { switch v := v.(*PerformMaintenanceNodeGroupRequest); i { case 0: return &v.state @@ -207265,7 +207265,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[993].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[993].Exporter = func(v any, i int) any { switch v := v.(*Policy); i { case 0: return &v.state @@ -207277,7 +207277,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[994].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[994].Exporter = func(v any, i int) any { switch v := v.(*PreconfiguredWafSet); i { case 0: return &v.state @@ -207289,7 +207289,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[995].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[995].Exporter = func(v any, i int) any { switch v := v.(*PreservedState); i { case 0: return &v.state @@ -207301,7 +207301,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[996].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[996].Exporter = func(v any, i int) any { switch v := v.(*PreservedStatePreservedDisk); i { case 0: return &v.state @@ -207313,7 +207313,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[997].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[997].Exporter = func(v any, i int) any { switch v := v.(*PreservedStatePreservedNetworkIp); i { case 0: return &v.state @@ -207325,7 +207325,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[998].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[998].Exporter = func(v any, i int) any { switch v := v.(*PreservedStatePreservedNetworkIpIpAddress); i { case 0: return &v.state @@ -207337,7 +207337,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[999].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[999].Exporter = func(v any, i int) any { switch v := v.(*PreviewRouterRequest); i { case 0: return &v.state @@ -207349,7 +207349,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1000].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1000].Exporter = func(v any, i int) any { switch v := v.(*Project); i { case 0: return &v.state @@ -207361,7 +207361,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1001].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1001].Exporter = func(v any, i int) any { switch v := v.(*ProjectsDisableXpnResourceRequest); i { case 0: return &v.state @@ -207373,7 +207373,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1002].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1002].Exporter = func(v any, i int) any { switch v := v.(*ProjectsEnableXpnResourceRequest); i { case 0: return &v.state @@ -207385,7 +207385,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1003].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1003].Exporter = func(v any, i int) any { switch v := v.(*ProjectsGetXpnResources); i { case 0: return &v.state @@ -207397,7 +207397,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1004].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1004].Exporter = func(v any, i int) any { switch v := v.(*ProjectsListXpnHostsRequest); i { case 0: return &v.state @@ -207409,7 +207409,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1005].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1005].Exporter = func(v any, i int) any { switch v := v.(*ProjectsSetCloudArmorTierRequest); i { case 0: return &v.state @@ -207421,7 +207421,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1006].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1006].Exporter = func(v any, i int) any { switch v := v.(*ProjectsSetDefaultNetworkTierRequest); i { case 0: return &v.state @@ -207433,7 +207433,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1007].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1007].Exporter = func(v any, i int) any { switch v := v.(*PublicAdvertisedPrefix); i { case 0: return &v.state @@ -207445,7 +207445,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1008].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1008].Exporter = func(v any, i int) any { switch v := v.(*PublicAdvertisedPrefixList); i { case 0: return &v.state @@ -207457,7 +207457,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1009].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1009].Exporter = func(v any, i int) any { switch v := v.(*PublicAdvertisedPrefixPublicDelegatedPrefix); i { case 0: return &v.state @@ -207469,7 +207469,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1010].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1010].Exporter = func(v any, i int) any { switch v := v.(*PublicDelegatedPrefix); i { case 0: return &v.state @@ -207481,7 +207481,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1011].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1011].Exporter = func(v any, i int) any { switch v := v.(*PublicDelegatedPrefixAggregatedList); i { case 0: return &v.state @@ -207493,7 +207493,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1012].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1012].Exporter = func(v any, i int) any { switch v := v.(*PublicDelegatedPrefixList); i { case 0: return &v.state @@ -207505,7 +207505,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1013].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1013].Exporter = func(v any, i int) any { switch v := v.(*PublicDelegatedPrefixPublicDelegatedSubPrefix); i { case 0: return &v.state @@ -207517,7 +207517,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1014].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1014].Exporter = func(v any, i int) any { switch v := v.(*PublicDelegatedPrefixesScopedList); i { case 0: return &v.state @@ -207529,7 +207529,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1015].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1015].Exporter = func(v any, i int) any { switch v := v.(*Quota); i { case 0: return &v.state @@ -207541,7 +207541,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1016].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1016].Exporter = func(v any, i int) any { switch v := v.(*QuotaExceededInfo); i { case 0: return &v.state @@ -207553,7 +207553,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1017].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1017].Exporter = func(v any, i int) any { switch v := v.(*QuotaStatusWarning); i { case 0: return &v.state @@ -207565,7 +207565,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1018].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1018].Exporter = func(v any, i int) any { switch v := v.(*RawDisk); i { case 0: return &v.state @@ -207577,7 +207577,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1019].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1019].Exporter = func(v any, i int) any { switch v := v.(*RecreateInstancesInstanceGroupManagerRequest); i { case 0: return &v.state @@ -207589,7 +207589,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1020].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1020].Exporter = func(v any, i int) any { switch v := v.(*RecreateInstancesRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -207601,7 +207601,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1021].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1021].Exporter = func(v any, i int) any { switch v := v.(*Reference); i { case 0: return &v.state @@ -207613,7 +207613,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1022].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1022].Exporter = func(v any, i int) any { switch v := v.(*Region); i { case 0: return &v.state @@ -207625,7 +207625,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1023].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1023].Exporter = func(v any, i int) any { switch v := v.(*RegionAddressesMoveRequest); i { case 0: return &v.state @@ -207637,7 +207637,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1024].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1024].Exporter = func(v any, i int) any { switch v := v.(*RegionAutoscalerList); i { case 0: return &v.state @@ -207649,7 +207649,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1025].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1025].Exporter = func(v any, i int) any { switch v := v.(*RegionDiskTypeList); i { case 0: return &v.state @@ -207661,7 +207661,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1026].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1026].Exporter = func(v any, i int) any { switch v := v.(*RegionDisksAddResourcePoliciesRequest); i { case 0: return &v.state @@ -207673,7 +207673,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1027].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1027].Exporter = func(v any, i int) any { switch v := v.(*RegionDisksRemoveResourcePoliciesRequest); i { case 0: return &v.state @@ -207685,7 +207685,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1028].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1028].Exporter = func(v any, i int) any { switch v := v.(*RegionDisksResizeRequest); i { case 0: return &v.state @@ -207697,7 +207697,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1029].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1029].Exporter = func(v any, i int) any { switch v := v.(*RegionDisksStartAsyncReplicationRequest); i { case 0: return &v.state @@ -207709,7 +207709,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1030].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1030].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupList); i { case 0: return &v.state @@ -207721,7 +207721,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1031].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1031].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagerDeleteInstanceConfigReq); i { case 0: return &v.state @@ -207733,7 +207733,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1032].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1032].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagerList); i { case 0: return &v.state @@ -207745,7 +207745,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1033].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1033].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagerPatchInstanceConfigReq); i { case 0: return &v.state @@ -207757,7 +207757,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1034].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1034].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagerUpdateInstanceConfigReq); i { case 0: return &v.state @@ -207769,7 +207769,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1035].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1035].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersAbandonInstancesRequest); i { case 0: return &v.state @@ -207781,7 +207781,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1036].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1036].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersApplyUpdatesRequest); i { case 0: return &v.state @@ -207793,7 +207793,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1037].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1037].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersCreateInstancesRequest); i { case 0: return &v.state @@ -207805,7 +207805,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1038].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1038].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersDeleteInstancesRequest); i { case 0: return &v.state @@ -207817,7 +207817,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1039].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1039].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersListErrorsResponse); i { case 0: return &v.state @@ -207829,7 +207829,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1040].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1040].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersListInstanceConfigsResp); i { case 0: return &v.state @@ -207841,7 +207841,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1041].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1041].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersListInstancesResponse); i { case 0: return &v.state @@ -207853,7 +207853,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1042].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1042].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersRecreateRequest); i { case 0: return &v.state @@ -207865,7 +207865,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1043].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1043].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersSetTargetPoolsRequest); i { case 0: return &v.state @@ -207877,7 +207877,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1044].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1044].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupManagersSetTemplateRequest); i { case 0: return &v.state @@ -207889,7 +207889,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1045].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1045].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupsListInstances); i { case 0: return &v.state @@ -207901,7 +207901,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1046].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1046].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupsListInstancesRequest); i { case 0: return &v.state @@ -207913,7 +207913,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1047].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1047].Exporter = func(v any, i int) any { switch v := v.(*RegionInstanceGroupsSetNamedPortsRequest); i { case 0: return &v.state @@ -207925,7 +207925,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1048].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1048].Exporter = func(v any, i int) any { switch v := v.(*RegionList); i { case 0: return &v.state @@ -207937,7 +207937,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1049].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1049].Exporter = func(v any, i int) any { switch v := v.(*RegionNetworkEndpointGroupsAttachEndpointsRequest); i { case 0: return &v.state @@ -207949,7 +207949,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1050].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1050].Exporter = func(v any, i int) any { switch v := v.(*RegionNetworkEndpointGroupsDetachEndpointsRequest); i { case 0: return &v.state @@ -207961,7 +207961,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1051].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1051].Exporter = func(v any, i int) any { switch v := v.(*RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponse); i { case 0: return &v.state @@ -207973,7 +207973,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1052].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1052].Exporter = func(v any, i int) any { switch v := v.(*RegionNetworkFirewallPoliciesGetEffectiveFirewallsResponseEffectiveFirewallPolicy); i { case 0: return &v.state @@ -207985,7 +207985,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1053].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1053].Exporter = func(v any, i int) any { switch v := v.(*RegionSetLabelsRequest); i { case 0: return &v.state @@ -207997,7 +207997,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1054].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1054].Exporter = func(v any, i int) any { switch v := v.(*RegionSetPolicyRequest); i { case 0: return &v.state @@ -208009,7 +208009,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1055].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1055].Exporter = func(v any, i int) any { switch v := v.(*RegionTargetHttpsProxiesSetSslCertificatesRequest); i { case 0: return &v.state @@ -208021,7 +208021,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1056].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1056].Exporter = func(v any, i int) any { switch v := v.(*RegionUrlMapsValidateRequest); i { case 0: return &v.state @@ -208033,7 +208033,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1057].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1057].Exporter = func(v any, i int) any { switch v := v.(*RemoveAssociationFirewallPolicyRequest); i { case 0: return &v.state @@ -208045,7 +208045,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1058].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1058].Exporter = func(v any, i int) any { switch v := v.(*RemoveAssociationNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -208057,7 +208057,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1059].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1059].Exporter = func(v any, i int) any { switch v := v.(*RemoveAssociationRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -208069,7 +208069,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1060].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1060].Exporter = func(v any, i int) any { switch v := v.(*RemoveHealthCheckTargetPoolRequest); i { case 0: return &v.state @@ -208081,7 +208081,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1061].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1061].Exporter = func(v any, i int) any { switch v := v.(*RemoveInstanceTargetPoolRequest); i { case 0: return &v.state @@ -208093,7 +208093,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1062].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1062].Exporter = func(v any, i int) any { switch v := v.(*RemoveInstancesInstanceGroupRequest); i { case 0: return &v.state @@ -208105,7 +208105,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1063].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1063].Exporter = func(v any, i int) any { switch v := v.(*RemovePeeringNetworkRequest); i { case 0: return &v.state @@ -208117,7 +208117,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1064].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1064].Exporter = func(v any, i int) any { switch v := v.(*RemoveResourcePoliciesDiskRequest); i { case 0: return &v.state @@ -208129,7 +208129,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1065].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1065].Exporter = func(v any, i int) any { switch v := v.(*RemoveResourcePoliciesInstanceRequest); i { case 0: return &v.state @@ -208141,7 +208141,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1066].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1066].Exporter = func(v any, i int) any { switch v := v.(*RemoveResourcePoliciesRegionDiskRequest); i { case 0: return &v.state @@ -208153,7 +208153,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1067].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1067].Exporter = func(v any, i int) any { switch v := v.(*RemoveRuleFirewallPolicyRequest); i { case 0: return &v.state @@ -208165,7 +208165,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1068].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1068].Exporter = func(v any, i int) any { switch v := v.(*RemoveRuleNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -208177,7 +208177,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1069].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1069].Exporter = func(v any, i int) any { switch v := v.(*RemoveRuleRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -208189,7 +208189,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1070].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1070].Exporter = func(v any, i int) any { switch v := v.(*RemoveRuleRegionSecurityPolicyRequest); i { case 0: return &v.state @@ -208201,7 +208201,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1071].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1071].Exporter = func(v any, i int) any { switch v := v.(*RemoveRuleSecurityPolicyRequest); i { case 0: return &v.state @@ -208213,7 +208213,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1072].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1072].Exporter = func(v any, i int) any { switch v := v.(*RequestMirrorPolicy); i { case 0: return &v.state @@ -208225,7 +208225,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1073].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1073].Exporter = func(v any, i int) any { switch v := v.(*Reservation); i { case 0: return &v.state @@ -208237,7 +208237,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1074].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1074].Exporter = func(v any, i int) any { switch v := v.(*ReservationAffinity); i { case 0: return &v.state @@ -208249,7 +208249,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1075].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1075].Exporter = func(v any, i int) any { switch v := v.(*ReservationAggregatedList); i { case 0: return &v.state @@ -208261,7 +208261,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1076].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1076].Exporter = func(v any, i int) any { switch v := v.(*ReservationList); i { case 0: return &v.state @@ -208273,7 +208273,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1077].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1077].Exporter = func(v any, i int) any { switch v := v.(*ReservationsResizeRequest); i { case 0: return &v.state @@ -208285,7 +208285,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1078].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1078].Exporter = func(v any, i int) any { switch v := v.(*ReservationsScopedList); i { case 0: return &v.state @@ -208297,7 +208297,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1079].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1079].Exporter = func(v any, i int) any { switch v := v.(*ResetInstanceRequest); i { case 0: return &v.state @@ -208309,7 +208309,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1080].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1080].Exporter = func(v any, i int) any { switch v := v.(*ResizeDiskRequest); i { case 0: return &v.state @@ -208321,7 +208321,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1081].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1081].Exporter = func(v any, i int) any { switch v := v.(*ResizeInstanceGroupManagerRequest); i { case 0: return &v.state @@ -208333,7 +208333,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1082].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1082].Exporter = func(v any, i int) any { switch v := v.(*ResizeRegionDiskRequest); i { case 0: return &v.state @@ -208345,7 +208345,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1083].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1083].Exporter = func(v any, i int) any { switch v := v.(*ResizeRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -208357,7 +208357,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1084].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1084].Exporter = func(v any, i int) any { switch v := v.(*ResizeReservationRequest); i { case 0: return &v.state @@ -208369,7 +208369,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1085].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1085].Exporter = func(v any, i int) any { switch v := v.(*ResourceCommitment); i { case 0: return &v.state @@ -208381,7 +208381,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1086].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1086].Exporter = func(v any, i int) any { switch v := v.(*ResourceGroupReference); i { case 0: return &v.state @@ -208393,7 +208393,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1087].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1087].Exporter = func(v any, i int) any { switch v := v.(*ResourcePoliciesScopedList); i { case 0: return &v.state @@ -208405,7 +208405,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1088].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1088].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicy); i { case 0: return &v.state @@ -208417,7 +208417,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1089].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1089].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyAggregatedList); i { case 0: return &v.state @@ -208429,7 +208429,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1090].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1090].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyDailyCycle); i { case 0: return &v.state @@ -208441,7 +208441,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1091].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1091].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyDiskConsistencyGroupPolicy); i { case 0: return &v.state @@ -208453,7 +208453,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1092].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1092].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyGroupPlacementPolicy); i { case 0: return &v.state @@ -208465,7 +208465,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1093].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1093].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyHourlyCycle); i { case 0: return &v.state @@ -208477,7 +208477,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1094].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1094].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyInstanceSchedulePolicy); i { case 0: return &v.state @@ -208489,7 +208489,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1095].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1095].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyInstanceSchedulePolicySchedule); i { case 0: return &v.state @@ -208501,7 +208501,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1096].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1096].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyList); i { case 0: return &v.state @@ -208513,7 +208513,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1097].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1097].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyResourceStatus); i { case 0: return &v.state @@ -208525,7 +208525,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1098].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1098].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyResourceStatusInstanceSchedulePolicyStatus); i { case 0: return &v.state @@ -208537,7 +208537,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1099].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1099].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicySnapshotSchedulePolicy); i { case 0: return &v.state @@ -208549,7 +208549,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1100].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1100].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicySnapshotSchedulePolicyRetentionPolicy); i { case 0: return &v.state @@ -208561,7 +208561,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1101].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1101].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicySnapshotSchedulePolicySchedule); i { case 0: return &v.state @@ -208573,7 +208573,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1102].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1102].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicySnapshotSchedulePolicySnapshotProperties); i { case 0: return &v.state @@ -208585,7 +208585,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1103].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1103].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyWeeklyCycle); i { case 0: return &v.state @@ -208597,7 +208597,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1104].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1104].Exporter = func(v any, i int) any { switch v := v.(*ResourcePolicyWeeklyCycleDayOfWeek); i { case 0: return &v.state @@ -208609,7 +208609,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1105].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1105].Exporter = func(v any, i int) any { switch v := v.(*ResourceStatus); i { case 0: return &v.state @@ -208621,7 +208621,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1106].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1106].Exporter = func(v any, i int) any { switch v := v.(*ResumeInstanceRequest); i { case 0: return &v.state @@ -208633,7 +208633,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1107].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1107].Exporter = func(v any, i int) any { switch v := v.(*Route); i { case 0: return &v.state @@ -208645,7 +208645,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1108].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1108].Exporter = func(v any, i int) any { switch v := v.(*RouteAsPath); i { case 0: return &v.state @@ -208657,7 +208657,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1109].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1109].Exporter = func(v any, i int) any { switch v := v.(*RouteList); i { case 0: return &v.state @@ -208669,7 +208669,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1110].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1110].Exporter = func(v any, i int) any { switch v := v.(*Router); i { case 0: return &v.state @@ -208681,7 +208681,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1111].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1111].Exporter = func(v any, i int) any { switch v := v.(*RouterAdvertisedIpRange); i { case 0: return &v.state @@ -208693,7 +208693,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1112].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1112].Exporter = func(v any, i int) any { switch v := v.(*RouterAggregatedList); i { case 0: return &v.state @@ -208705,7 +208705,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1113].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1113].Exporter = func(v any, i int) any { switch v := v.(*RouterBgp); i { case 0: return &v.state @@ -208717,7 +208717,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1114].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1114].Exporter = func(v any, i int) any { switch v := v.(*RouterBgpPeer); i { case 0: return &v.state @@ -208729,7 +208729,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1115].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1115].Exporter = func(v any, i int) any { switch v := v.(*RouterBgpPeerBfd); i { case 0: return &v.state @@ -208741,7 +208741,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1116].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1116].Exporter = func(v any, i int) any { switch v := v.(*RouterBgpPeerCustomLearnedIpRange); i { case 0: return &v.state @@ -208753,7 +208753,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1117].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1117].Exporter = func(v any, i int) any { switch v := v.(*RouterInterface); i { case 0: return &v.state @@ -208765,7 +208765,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1118].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1118].Exporter = func(v any, i int) any { switch v := v.(*RouterList); i { case 0: return &v.state @@ -208777,7 +208777,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1119].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1119].Exporter = func(v any, i int) any { switch v := v.(*RouterMd5AuthenticationKey); i { case 0: return &v.state @@ -208789,7 +208789,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1120].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1120].Exporter = func(v any, i int) any { switch v := v.(*RouterNat); i { case 0: return &v.state @@ -208801,7 +208801,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1121].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1121].Exporter = func(v any, i int) any { switch v := v.(*RouterNatLogConfig); i { case 0: return &v.state @@ -208813,7 +208813,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1122].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1122].Exporter = func(v any, i int) any { switch v := v.(*RouterNatRule); i { case 0: return &v.state @@ -208825,7 +208825,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1123].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1123].Exporter = func(v any, i int) any { switch v := v.(*RouterNatRuleAction); i { case 0: return &v.state @@ -208837,7 +208837,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1124].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1124].Exporter = func(v any, i int) any { switch v := v.(*RouterNatSubnetworkToNat); i { case 0: return &v.state @@ -208849,7 +208849,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1125].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1125].Exporter = func(v any, i int) any { switch v := v.(*RouterStatus); i { case 0: return &v.state @@ -208861,7 +208861,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1126].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1126].Exporter = func(v any, i int) any { switch v := v.(*RouterStatusBgpPeerStatus); i { case 0: return &v.state @@ -208873,7 +208873,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1127].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1127].Exporter = func(v any, i int) any { switch v := v.(*RouterStatusNatStatus); i { case 0: return &v.state @@ -208885,7 +208885,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1128].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1128].Exporter = func(v any, i int) any { switch v := v.(*RouterStatusNatStatusNatRuleStatus); i { case 0: return &v.state @@ -208897,7 +208897,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1129].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1129].Exporter = func(v any, i int) any { switch v := v.(*RouterStatusResponse); i { case 0: return &v.state @@ -208909,7 +208909,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1130].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1130].Exporter = func(v any, i int) any { switch v := v.(*RoutersPreviewResponse); i { case 0: return &v.state @@ -208921,7 +208921,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1131].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1131].Exporter = func(v any, i int) any { switch v := v.(*RoutersScopedList); i { case 0: return &v.state @@ -208933,7 +208933,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1132].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1132].Exporter = func(v any, i int) any { switch v := v.(*Rule); i { case 0: return &v.state @@ -208945,7 +208945,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1133].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1133].Exporter = func(v any, i int) any { switch v := v.(*SSLHealthCheck); i { case 0: return &v.state @@ -208957,7 +208957,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1134].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1134].Exporter = func(v any, i int) any { switch v := v.(*SavedAttachedDisk); i { case 0: return &v.state @@ -208969,7 +208969,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1135].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1135].Exporter = func(v any, i int) any { switch v := v.(*SavedDisk); i { case 0: return &v.state @@ -208981,7 +208981,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1136].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1136].Exporter = func(v any, i int) any { switch v := v.(*ScalingScheduleStatus); i { case 0: return &v.state @@ -208993,7 +208993,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1137].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1137].Exporter = func(v any, i int) any { switch v := v.(*Scheduling); i { case 0: return &v.state @@ -209005,7 +209005,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1138].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1138].Exporter = func(v any, i int) any { switch v := v.(*SchedulingNodeAffinity); i { case 0: return &v.state @@ -209017,7 +209017,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1139].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1139].Exporter = func(v any, i int) any { switch v := v.(*ScratchDisks); i { case 0: return &v.state @@ -209029,7 +209029,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1140].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1140].Exporter = func(v any, i int) any { switch v := v.(*Screenshot); i { case 0: return &v.state @@ -209041,7 +209041,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1141].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1141].Exporter = func(v any, i int) any { switch v := v.(*SecurityPoliciesAggregatedList); i { case 0: return &v.state @@ -209053,7 +209053,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1142].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1142].Exporter = func(v any, i int) any { switch v := v.(*SecurityPoliciesListPreconfiguredExpressionSetsResponse); i { case 0: return &v.state @@ -209065,7 +209065,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1143].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1143].Exporter = func(v any, i int) any { switch v := v.(*SecurityPoliciesScopedList); i { case 0: return &v.state @@ -209077,7 +209077,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1144].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1144].Exporter = func(v any, i int) any { switch v := v.(*SecurityPoliciesWafConfig); i { case 0: return &v.state @@ -209089,7 +209089,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1145].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1145].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicy); i { case 0: return &v.state @@ -209101,7 +209101,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1146].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1146].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyAdaptiveProtectionConfig); i { case 0: return &v.state @@ -209113,7 +209113,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1147].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1147].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig); i { case 0: return &v.state @@ -209125,7 +209125,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1148].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1148].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfig); i { case 0: return &v.state @@ -209137,7 +209137,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1149].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1149].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyAdvancedOptionsConfig); i { case 0: return &v.state @@ -209149,7 +209149,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1150].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1150].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyAdvancedOptionsConfigJsonCustomConfig); i { case 0: return &v.state @@ -209161,7 +209161,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1151].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1151].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyDdosProtectionConfig); i { case 0: return &v.state @@ -209173,7 +209173,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1152].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1152].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyList); i { case 0: return &v.state @@ -209185,7 +209185,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1153].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1153].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRecaptchaOptionsConfig); i { case 0: return &v.state @@ -209197,7 +209197,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1154].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1154].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyReference); i { case 0: return &v.state @@ -209209,7 +209209,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1155].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1155].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRule); i { case 0: return &v.state @@ -209221,7 +209221,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1156].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1156].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleHttpHeaderAction); i { case 0: return &v.state @@ -209233,7 +209233,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1157].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1157].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleHttpHeaderActionHttpHeaderOption); i { case 0: return &v.state @@ -209245,7 +209245,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1158].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1158].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleMatcher); i { case 0: return &v.state @@ -209257,7 +209257,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1159].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1159].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleMatcherConfig); i { case 0: return &v.state @@ -209269,7 +209269,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1160].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1160].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleMatcherExprOptions); i { case 0: return &v.state @@ -209281,7 +209281,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1161].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1161].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions); i { case 0: return &v.state @@ -209293,7 +209293,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1162].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1162].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleNetworkMatcher); i { case 0: return &v.state @@ -209305,7 +209305,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1163].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1163].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleNetworkMatcherUserDefinedFieldMatch); i { case 0: return &v.state @@ -209317,7 +209317,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1164].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1164].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRulePreconfiguredWafConfig); i { case 0: return &v.state @@ -209329,7 +209329,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1165].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1165].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRulePreconfiguredWafConfigExclusion); i { case 0: return &v.state @@ -209341,7 +209341,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1166].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1166].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams); i { case 0: return &v.state @@ -209353,7 +209353,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1167].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1167].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleRateLimitOptions); i { case 0: return &v.state @@ -209365,7 +209365,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1168].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1168].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig); i { case 0: return &v.state @@ -209377,7 +209377,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1169].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1169].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleRateLimitOptionsThreshold); i { case 0: return &v.state @@ -209389,7 +209389,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1170].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1170].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyRuleRedirectOptions); i { case 0: return &v.state @@ -209401,7 +209401,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1171].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1171].Exporter = func(v any, i int) any { switch v := v.(*SecurityPolicyUserDefinedField); i { case 0: return &v.state @@ -209413,7 +209413,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1172].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1172].Exporter = func(v any, i int) any { switch v := v.(*SecuritySettings); i { case 0: return &v.state @@ -209425,7 +209425,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1173].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1173].Exporter = func(v any, i int) any { switch v := v.(*SendDiagnosticInterruptInstanceRequest); i { case 0: return &v.state @@ -209437,7 +209437,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1174].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1174].Exporter = func(v any, i int) any { switch v := v.(*SendDiagnosticInterruptInstanceResponse); i { case 0: return &v.state @@ -209449,7 +209449,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1175].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1175].Exporter = func(v any, i int) any { switch v := v.(*SerialPortOutput); i { case 0: return &v.state @@ -209461,7 +209461,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1176].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1176].Exporter = func(v any, i int) any { switch v := v.(*ServerBinding); i { case 0: return &v.state @@ -209473,7 +209473,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1177].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1177].Exporter = func(v any, i int) any { switch v := v.(*ServiceAccount); i { case 0: return &v.state @@ -209485,7 +209485,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1178].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1178].Exporter = func(v any, i int) any { switch v := v.(*ServiceAttachment); i { case 0: return &v.state @@ -209497,7 +209497,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1179].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1179].Exporter = func(v any, i int) any { switch v := v.(*ServiceAttachmentAggregatedList); i { case 0: return &v.state @@ -209509,7 +209509,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1180].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1180].Exporter = func(v any, i int) any { switch v := v.(*ServiceAttachmentConnectedEndpoint); i { case 0: return &v.state @@ -209521,7 +209521,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1181].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1181].Exporter = func(v any, i int) any { switch v := v.(*ServiceAttachmentConsumerProjectLimit); i { case 0: return &v.state @@ -209533,7 +209533,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1182].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1182].Exporter = func(v any, i int) any { switch v := v.(*ServiceAttachmentList); i { case 0: return &v.state @@ -209545,7 +209545,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1183].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1183].Exporter = func(v any, i int) any { switch v := v.(*ServiceAttachmentsScopedList); i { case 0: return &v.state @@ -209557,7 +209557,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1184].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1184].Exporter = func(v any, i int) any { switch v := v.(*SetBackendServiceTargetSslProxyRequest); i { case 0: return &v.state @@ -209569,7 +209569,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1185].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1185].Exporter = func(v any, i int) any { switch v := v.(*SetBackendServiceTargetTcpProxyRequest); i { case 0: return &v.state @@ -209581,7 +209581,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1186].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1186].Exporter = func(v any, i int) any { switch v := v.(*SetBackupTargetPoolRequest); i { case 0: return &v.state @@ -209593,7 +209593,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1187].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1187].Exporter = func(v any, i int) any { switch v := v.(*SetCertificateMapTargetHttpsProxyRequest); i { case 0: return &v.state @@ -209605,7 +209605,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1188].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1188].Exporter = func(v any, i int) any { switch v := v.(*SetCertificateMapTargetSslProxyRequest); i { case 0: return &v.state @@ -209617,7 +209617,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1189].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1189].Exporter = func(v any, i int) any { switch v := v.(*SetCloudArmorTierProjectRequest); i { case 0: return &v.state @@ -209629,7 +209629,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1190].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1190].Exporter = func(v any, i int) any { switch v := v.(*SetCommonInstanceMetadataOperationMetadata); i { case 0: return &v.state @@ -209641,7 +209641,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1191].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1191].Exporter = func(v any, i int) any { switch v := v.(*SetCommonInstanceMetadataOperationMetadataPerLocationOperationInfo); i { case 0: return &v.state @@ -209653,7 +209653,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1192].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1192].Exporter = func(v any, i int) any { switch v := v.(*SetCommonInstanceMetadataProjectRequest); i { case 0: return &v.state @@ -209665,7 +209665,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1193].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1193].Exporter = func(v any, i int) any { switch v := v.(*SetDefaultNetworkTierProjectRequest); i { case 0: return &v.state @@ -209677,7 +209677,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1194].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1194].Exporter = func(v any, i int) any { switch v := v.(*SetDeletionProtectionInstanceRequest); i { case 0: return &v.state @@ -209689,7 +209689,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1195].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1195].Exporter = func(v any, i int) any { switch v := v.(*SetDiskAutoDeleteInstanceRequest); i { case 0: return &v.state @@ -209701,7 +209701,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1196].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1196].Exporter = func(v any, i int) any { switch v := v.(*SetEdgeSecurityPolicyBackendBucketRequest); i { case 0: return &v.state @@ -209713,7 +209713,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1197].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1197].Exporter = func(v any, i int) any { switch v := v.(*SetEdgeSecurityPolicyBackendServiceRequest); i { case 0: return &v.state @@ -209725,7 +209725,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1198].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1198].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyBackendBucketRequest); i { case 0: return &v.state @@ -209737,7 +209737,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1199].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1199].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyBackendServiceRequest); i { case 0: return &v.state @@ -209749,7 +209749,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1200].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1200].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyDiskRequest); i { case 0: return &v.state @@ -209761,7 +209761,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1201].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1201].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyFirewallPolicyRequest); i { case 0: return &v.state @@ -209773,7 +209773,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1202].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1202].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyImageRequest); i { case 0: return &v.state @@ -209785,7 +209785,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1203].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1203].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyInstanceRequest); i { case 0: return &v.state @@ -209797,7 +209797,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1204].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1204].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyInstanceTemplateRequest); i { case 0: return &v.state @@ -209809,7 +209809,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1205].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1205].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyInstantSnapshotRequest); i { case 0: return &v.state @@ -209821,7 +209821,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1206].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1206].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyLicenseRequest); i { case 0: return &v.state @@ -209833,7 +209833,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1207].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1207].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyMachineImageRequest); i { case 0: return &v.state @@ -209845,7 +209845,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1208].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1208].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyNetworkAttachmentRequest); i { case 0: return &v.state @@ -209857,7 +209857,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1209].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1209].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -209869,7 +209869,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1210].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1210].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyNodeGroupRequest); i { case 0: return &v.state @@ -209881,7 +209881,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1211].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1211].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyNodeTemplateRequest); i { case 0: return &v.state @@ -209893,7 +209893,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1212].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1212].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyRegionBackendServiceRequest); i { case 0: return &v.state @@ -209905,7 +209905,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1213].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1213].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyRegionDiskRequest); i { case 0: return &v.state @@ -209917,7 +209917,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1214].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1214].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyRegionInstantSnapshotRequest); i { case 0: return &v.state @@ -209929,7 +209929,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1215].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1215].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -209941,7 +209941,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1216].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1216].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyReservationRequest); i { case 0: return &v.state @@ -209953,7 +209953,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1217].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1217].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyResourcePolicyRequest); i { case 0: return &v.state @@ -209965,7 +209965,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1218].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1218].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyServiceAttachmentRequest); i { case 0: return &v.state @@ -209977,7 +209977,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1219].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1219].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicySnapshotRequest); i { case 0: return &v.state @@ -209989,7 +209989,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1220].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1220].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicyStoragePoolRequest); i { case 0: return &v.state @@ -210001,7 +210001,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1221].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1221].Exporter = func(v any, i int) any { switch v := v.(*SetIamPolicySubnetworkRequest); i { case 0: return &v.state @@ -210013,7 +210013,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1222].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1222].Exporter = func(v any, i int) any { switch v := v.(*SetInstanceTemplateInstanceGroupManagerRequest); i { case 0: return &v.state @@ -210025,7 +210025,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1223].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1223].Exporter = func(v any, i int) any { switch v := v.(*SetInstanceTemplateRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -210037,7 +210037,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1224].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1224].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsAddressRequest); i { case 0: return &v.state @@ -210049,7 +210049,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1225].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1225].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsDiskRequest); i { case 0: return &v.state @@ -210061,7 +210061,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1226].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1226].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsExternalVpnGatewayRequest); i { case 0: return &v.state @@ -210073,7 +210073,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1227].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1227].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsForwardingRuleRequest); i { case 0: return &v.state @@ -210085,7 +210085,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1228].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1228].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsGlobalAddressRequest); i { case 0: return &v.state @@ -210097,7 +210097,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1229].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1229].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsGlobalForwardingRuleRequest); i { case 0: return &v.state @@ -210109,7 +210109,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1230].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1230].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsImageRequest); i { case 0: return &v.state @@ -210121,7 +210121,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1231].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1231].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsInstanceRequest); i { case 0: return &v.state @@ -210133,7 +210133,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1232].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1232].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsInstantSnapshotRequest); i { case 0: return &v.state @@ -210145,7 +210145,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1233].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1233].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsInterconnectAttachmentRequest); i { case 0: return &v.state @@ -210157,7 +210157,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1234].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1234].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsInterconnectRequest); i { case 0: return &v.state @@ -210169,7 +210169,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1235].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1235].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsRegionDiskRequest); i { case 0: return &v.state @@ -210181,7 +210181,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1236].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1236].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsRegionInstantSnapshotRequest); i { case 0: return &v.state @@ -210193,7 +210193,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1237].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1237].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsSecurityPolicyRequest); i { case 0: return &v.state @@ -210205,7 +210205,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1238].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1238].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsSnapshotRequest); i { case 0: return &v.state @@ -210217,7 +210217,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1239].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1239].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsTargetVpnGatewayRequest); i { case 0: return &v.state @@ -210229,7 +210229,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1240].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1240].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsVpnGatewayRequest); i { case 0: return &v.state @@ -210241,7 +210241,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1241].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1241].Exporter = func(v any, i int) any { switch v := v.(*SetLabelsVpnTunnelRequest); i { case 0: return &v.state @@ -210253,7 +210253,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1242].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1242].Exporter = func(v any, i int) any { switch v := v.(*SetMachineResourcesInstanceRequest); i { case 0: return &v.state @@ -210265,7 +210265,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1243].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1243].Exporter = func(v any, i int) any { switch v := v.(*SetMachineTypeInstanceRequest); i { case 0: return &v.state @@ -210277,7 +210277,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1244].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1244].Exporter = func(v any, i int) any { switch v := v.(*SetMetadataInstanceRequest); i { case 0: return &v.state @@ -210289,7 +210289,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1245].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1245].Exporter = func(v any, i int) any { switch v := v.(*SetMinCpuPlatformInstanceRequest); i { case 0: return &v.state @@ -210301,7 +210301,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1246].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1246].Exporter = func(v any, i int) any { switch v := v.(*SetNameInstanceRequest); i { case 0: return &v.state @@ -210313,7 +210313,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1247].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1247].Exporter = func(v any, i int) any { switch v := v.(*SetNamedPortsInstanceGroupRequest); i { case 0: return &v.state @@ -210325,7 +210325,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1248].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1248].Exporter = func(v any, i int) any { switch v := v.(*SetNamedPortsRegionInstanceGroupRequest); i { case 0: return &v.state @@ -210337,7 +210337,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1249].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1249].Exporter = func(v any, i int) any { switch v := v.(*SetNodeTemplateNodeGroupRequest); i { case 0: return &v.state @@ -210349,7 +210349,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1250].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1250].Exporter = func(v any, i int) any { switch v := v.(*SetPrivateIpGoogleAccessSubnetworkRequest); i { case 0: return &v.state @@ -210361,7 +210361,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1251].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1251].Exporter = func(v any, i int) any { switch v := v.(*SetProxyHeaderTargetSslProxyRequest); i { case 0: return &v.state @@ -210373,7 +210373,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1252].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1252].Exporter = func(v any, i int) any { switch v := v.(*SetProxyHeaderTargetTcpProxyRequest); i { case 0: return &v.state @@ -210385,7 +210385,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1253].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1253].Exporter = func(v any, i int) any { switch v := v.(*SetQuicOverrideTargetHttpsProxyRequest); i { case 0: return &v.state @@ -210397,7 +210397,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1254].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1254].Exporter = func(v any, i int) any { switch v := v.(*SetSchedulingInstanceRequest); i { case 0: return &v.state @@ -210409,7 +210409,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1255].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1255].Exporter = func(v any, i int) any { switch v := v.(*SetSecurityPolicyBackendServiceRequest); i { case 0: return &v.state @@ -210421,7 +210421,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1256].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1256].Exporter = func(v any, i int) any { switch v := v.(*SetSecurityPolicyInstanceRequest); i { case 0: return &v.state @@ -210433,7 +210433,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1257].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1257].Exporter = func(v any, i int) any { switch v := v.(*SetSecurityPolicyRegionBackendServiceRequest); i { case 0: return &v.state @@ -210445,7 +210445,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1258].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1258].Exporter = func(v any, i int) any { switch v := v.(*SetSecurityPolicyTargetInstanceRequest); i { case 0: return &v.state @@ -210457,7 +210457,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1259].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1259].Exporter = func(v any, i int) any { switch v := v.(*SetSecurityPolicyTargetPoolRequest); i { case 0: return &v.state @@ -210469,7 +210469,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1260].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1260].Exporter = func(v any, i int) any { switch v := v.(*SetServiceAccountInstanceRequest); i { case 0: return &v.state @@ -210481,7 +210481,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1261].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1261].Exporter = func(v any, i int) any { switch v := v.(*SetShieldedInstanceIntegrityPolicyInstanceRequest); i { case 0: return &v.state @@ -210493,7 +210493,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1262].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1262].Exporter = func(v any, i int) any { switch v := v.(*SetSslCertificatesRegionTargetHttpsProxyRequest); i { case 0: return &v.state @@ -210505,7 +210505,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1263].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1263].Exporter = func(v any, i int) any { switch v := v.(*SetSslCertificatesTargetHttpsProxyRequest); i { case 0: return &v.state @@ -210517,7 +210517,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1264].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1264].Exporter = func(v any, i int) any { switch v := v.(*SetSslCertificatesTargetSslProxyRequest); i { case 0: return &v.state @@ -210529,7 +210529,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1265].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1265].Exporter = func(v any, i int) any { switch v := v.(*SetSslPolicyTargetHttpsProxyRequest); i { case 0: return &v.state @@ -210541,7 +210541,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1266].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1266].Exporter = func(v any, i int) any { switch v := v.(*SetSslPolicyTargetSslProxyRequest); i { case 0: return &v.state @@ -210553,7 +210553,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1267].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1267].Exporter = func(v any, i int) any { switch v := v.(*SetTagsInstanceRequest); i { case 0: return &v.state @@ -210565,7 +210565,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1268].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1268].Exporter = func(v any, i int) any { switch v := v.(*SetTargetForwardingRuleRequest); i { case 0: return &v.state @@ -210577,7 +210577,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1269].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1269].Exporter = func(v any, i int) any { switch v := v.(*SetTargetGlobalForwardingRuleRequest); i { case 0: return &v.state @@ -210589,7 +210589,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1270].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1270].Exporter = func(v any, i int) any { switch v := v.(*SetTargetPoolsInstanceGroupManagerRequest); i { case 0: return &v.state @@ -210601,7 +210601,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1271].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1271].Exporter = func(v any, i int) any { switch v := v.(*SetTargetPoolsRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -210613,7 +210613,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1272].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1272].Exporter = func(v any, i int) any { switch v := v.(*SetUrlMapRegionTargetHttpProxyRequest); i { case 0: return &v.state @@ -210625,7 +210625,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1273].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1273].Exporter = func(v any, i int) any { switch v := v.(*SetUrlMapRegionTargetHttpsProxyRequest); i { case 0: return &v.state @@ -210637,7 +210637,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1274].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1274].Exporter = func(v any, i int) any { switch v := v.(*SetUrlMapTargetHttpProxyRequest); i { case 0: return &v.state @@ -210649,7 +210649,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1275].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1275].Exporter = func(v any, i int) any { switch v := v.(*SetUrlMapTargetHttpsProxyRequest); i { case 0: return &v.state @@ -210661,7 +210661,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1276].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1276].Exporter = func(v any, i int) any { switch v := v.(*SetUsageExportBucketProjectRequest); i { case 0: return &v.state @@ -210673,7 +210673,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1277].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1277].Exporter = func(v any, i int) any { switch v := v.(*ShareSettings); i { case 0: return &v.state @@ -210685,7 +210685,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1278].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1278].Exporter = func(v any, i int) any { switch v := v.(*ShareSettingsProjectConfig); i { case 0: return &v.state @@ -210697,7 +210697,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1279].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1279].Exporter = func(v any, i int) any { switch v := v.(*ShieldedInstanceConfig); i { case 0: return &v.state @@ -210709,7 +210709,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1280].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1280].Exporter = func(v any, i int) any { switch v := v.(*ShieldedInstanceIdentity); i { case 0: return &v.state @@ -210721,7 +210721,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1281].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1281].Exporter = func(v any, i int) any { switch v := v.(*ShieldedInstanceIdentityEntry); i { case 0: return &v.state @@ -210733,7 +210733,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1282].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1282].Exporter = func(v any, i int) any { switch v := v.(*ShieldedInstanceIntegrityPolicy); i { case 0: return &v.state @@ -210745,7 +210745,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1283].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1283].Exporter = func(v any, i int) any { switch v := v.(*SignedUrlKey); i { case 0: return &v.state @@ -210757,7 +210757,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1284].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1284].Exporter = func(v any, i int) any { switch v := v.(*SimulateMaintenanceEventInstanceRequest); i { case 0: return &v.state @@ -210769,7 +210769,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1285].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1285].Exporter = func(v any, i int) any { switch v := v.(*SimulateMaintenanceEventNodeGroupRequest); i { case 0: return &v.state @@ -210781,7 +210781,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1286].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1286].Exporter = func(v any, i int) any { switch v := v.(*Snapshot); i { case 0: return &v.state @@ -210793,7 +210793,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1287].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1287].Exporter = func(v any, i int) any { switch v := v.(*SnapshotList); i { case 0: return &v.state @@ -210805,7 +210805,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1288].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1288].Exporter = func(v any, i int) any { switch v := v.(*SnapshotSettings); i { case 0: return &v.state @@ -210817,7 +210817,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1289].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1289].Exporter = func(v any, i int) any { switch v := v.(*SnapshotSettingsStorageLocationSettings); i { case 0: return &v.state @@ -210829,7 +210829,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1290].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1290].Exporter = func(v any, i int) any { switch v := v.(*SnapshotSettingsStorageLocationSettingsStorageLocationPreference); i { case 0: return &v.state @@ -210841,7 +210841,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1291].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1291].Exporter = func(v any, i int) any { switch v := v.(*SourceDiskEncryptionKey); i { case 0: return &v.state @@ -210853,7 +210853,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1292].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1292].Exporter = func(v any, i int) any { switch v := v.(*SourceInstanceParams); i { case 0: return &v.state @@ -210865,7 +210865,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1293].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1293].Exporter = func(v any, i int) any { switch v := v.(*SourceInstanceProperties); i { case 0: return &v.state @@ -210877,7 +210877,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1294].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1294].Exporter = func(v any, i int) any { switch v := v.(*SslCertificate); i { case 0: return &v.state @@ -210889,7 +210889,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1295].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1295].Exporter = func(v any, i int) any { switch v := v.(*SslCertificateAggregatedList); i { case 0: return &v.state @@ -210901,7 +210901,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1296].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1296].Exporter = func(v any, i int) any { switch v := v.(*SslCertificateList); i { case 0: return &v.state @@ -210913,7 +210913,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1297].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1297].Exporter = func(v any, i int) any { switch v := v.(*SslCertificateManagedSslCertificate); i { case 0: return &v.state @@ -210925,7 +210925,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1298].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1298].Exporter = func(v any, i int) any { switch v := v.(*SslCertificateSelfManagedSslCertificate); i { case 0: return &v.state @@ -210937,7 +210937,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1299].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1299].Exporter = func(v any, i int) any { switch v := v.(*SslCertificatesScopedList); i { case 0: return &v.state @@ -210949,7 +210949,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1300].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1300].Exporter = func(v any, i int) any { switch v := v.(*SslPoliciesAggregatedList); i { case 0: return &v.state @@ -210961,7 +210961,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1301].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1301].Exporter = func(v any, i int) any { switch v := v.(*SslPoliciesList); i { case 0: return &v.state @@ -210973,7 +210973,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1302].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1302].Exporter = func(v any, i int) any { switch v := v.(*SslPoliciesListAvailableFeaturesResponse); i { case 0: return &v.state @@ -210985,7 +210985,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1303].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1303].Exporter = func(v any, i int) any { switch v := v.(*SslPoliciesScopedList); i { case 0: return &v.state @@ -210997,7 +210997,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1304].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1304].Exporter = func(v any, i int) any { switch v := v.(*SslPolicy); i { case 0: return &v.state @@ -211009,7 +211009,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1305].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1305].Exporter = func(v any, i int) any { switch v := v.(*SslPolicyReference); i { case 0: return &v.state @@ -211021,7 +211021,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1306].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1306].Exporter = func(v any, i int) any { switch v := v.(*StartAsyncReplicationDiskRequest); i { case 0: return &v.state @@ -211033,7 +211033,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1307].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1307].Exporter = func(v any, i int) any { switch v := v.(*StartAsyncReplicationRegionDiskRequest); i { case 0: return &v.state @@ -211045,7 +211045,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1308].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1308].Exporter = func(v any, i int) any { switch v := v.(*StartInstanceRequest); i { case 0: return &v.state @@ -211057,7 +211057,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1309].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1309].Exporter = func(v any, i int) any { switch v := v.(*StartWithEncryptionKeyInstanceRequest); i { case 0: return &v.state @@ -211069,7 +211069,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1310].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1310].Exporter = func(v any, i int) any { switch v := v.(*StatefulPolicy); i { case 0: return &v.state @@ -211081,7 +211081,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1311].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1311].Exporter = func(v any, i int) any { switch v := v.(*StatefulPolicyPreservedState); i { case 0: return &v.state @@ -211093,7 +211093,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1312].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1312].Exporter = func(v any, i int) any { switch v := v.(*StatefulPolicyPreservedStateDiskDevice); i { case 0: return &v.state @@ -211105,7 +211105,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1313].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1313].Exporter = func(v any, i int) any { switch v := v.(*StatefulPolicyPreservedStateNetworkIp); i { case 0: return &v.state @@ -211117,7 +211117,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1314].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1314].Exporter = func(v any, i int) any { switch v := v.(*Status); i { case 0: return &v.state @@ -211129,7 +211129,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1315].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1315].Exporter = func(v any, i int) any { switch v := v.(*StopAsyncReplicationDiskRequest); i { case 0: return &v.state @@ -211141,7 +211141,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1316].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1316].Exporter = func(v any, i int) any { switch v := v.(*StopAsyncReplicationRegionDiskRequest); i { case 0: return &v.state @@ -211153,7 +211153,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1317].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1317].Exporter = func(v any, i int) any { switch v := v.(*StopGroupAsyncReplicationDiskRequest); i { case 0: return &v.state @@ -211165,7 +211165,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1318].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1318].Exporter = func(v any, i int) any { switch v := v.(*StopGroupAsyncReplicationRegionDiskRequest); i { case 0: return &v.state @@ -211177,7 +211177,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1319].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1319].Exporter = func(v any, i int) any { switch v := v.(*StopInstanceRequest); i { case 0: return &v.state @@ -211189,7 +211189,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1320].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1320].Exporter = func(v any, i int) any { switch v := v.(*StoragePool); i { case 0: return &v.state @@ -211201,7 +211201,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1321].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1321].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolAggregatedList); i { case 0: return &v.state @@ -211213,7 +211213,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1322].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1322].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolDisk); i { case 0: return &v.state @@ -211225,7 +211225,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1323].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1323].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolList); i { case 0: return &v.state @@ -211237,7 +211237,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1324].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1324].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolListDisks); i { case 0: return &v.state @@ -211249,7 +211249,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1325].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1325].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolResourceStatus); i { case 0: return &v.state @@ -211261,7 +211261,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1326].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1326].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolType); i { case 0: return &v.state @@ -211273,7 +211273,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1327].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1327].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolTypeAggregatedList); i { case 0: return &v.state @@ -211285,7 +211285,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1328].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1328].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolTypeList); i { case 0: return &v.state @@ -211297,7 +211297,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1329].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1329].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolTypesScopedList); i { case 0: return &v.state @@ -211309,7 +211309,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1330].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1330].Exporter = func(v any, i int) any { switch v := v.(*StoragePoolsScopedList); i { case 0: return &v.state @@ -211321,7 +211321,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1331].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1331].Exporter = func(v any, i int) any { switch v := v.(*Subnetwork); i { case 0: return &v.state @@ -211333,7 +211333,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1332].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1332].Exporter = func(v any, i int) any { switch v := v.(*SubnetworkAggregatedList); i { case 0: return &v.state @@ -211345,7 +211345,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1333].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1333].Exporter = func(v any, i int) any { switch v := v.(*SubnetworkList); i { case 0: return &v.state @@ -211357,7 +211357,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1334].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1334].Exporter = func(v any, i int) any { switch v := v.(*SubnetworkLogConfig); i { case 0: return &v.state @@ -211369,7 +211369,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1335].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1335].Exporter = func(v any, i int) any { switch v := v.(*SubnetworkSecondaryRange); i { case 0: return &v.state @@ -211381,7 +211381,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1336].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1336].Exporter = func(v any, i int) any { switch v := v.(*SubnetworksExpandIpCidrRangeRequest); i { case 0: return &v.state @@ -211393,7 +211393,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1337].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1337].Exporter = func(v any, i int) any { switch v := v.(*SubnetworksScopedList); i { case 0: return &v.state @@ -211405,7 +211405,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1338].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1338].Exporter = func(v any, i int) any { switch v := v.(*SubnetworksSetPrivateIpGoogleAccessRequest); i { case 0: return &v.state @@ -211417,7 +211417,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1339].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1339].Exporter = func(v any, i int) any { switch v := v.(*Subsetting); i { case 0: return &v.state @@ -211429,7 +211429,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1340].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1340].Exporter = func(v any, i int) any { switch v := v.(*SuspendInstanceRequest); i { case 0: return &v.state @@ -211441,7 +211441,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1341].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1341].Exporter = func(v any, i int) any { switch v := v.(*SwitchToCustomModeNetworkRequest); i { case 0: return &v.state @@ -211453,7 +211453,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1342].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1342].Exporter = func(v any, i int) any { switch v := v.(*TCPHealthCheck); i { case 0: return &v.state @@ -211465,7 +211465,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1343].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1343].Exporter = func(v any, i int) any { switch v := v.(*Tags); i { case 0: return &v.state @@ -211477,7 +211477,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1344].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1344].Exporter = func(v any, i int) any { switch v := v.(*TargetGrpcProxy); i { case 0: return &v.state @@ -211489,7 +211489,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1345].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1345].Exporter = func(v any, i int) any { switch v := v.(*TargetGrpcProxyList); i { case 0: return &v.state @@ -211501,7 +211501,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1346].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1346].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpProxiesScopedList); i { case 0: return &v.state @@ -211513,7 +211513,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1347].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1347].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpProxy); i { case 0: return &v.state @@ -211525,7 +211525,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1348].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1348].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpProxyAggregatedList); i { case 0: return &v.state @@ -211537,7 +211537,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1349].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1349].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpProxyList); i { case 0: return &v.state @@ -211549,7 +211549,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1350].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1350].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpsProxiesScopedList); i { case 0: return &v.state @@ -211561,7 +211561,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1351].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1351].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpsProxiesSetCertificateMapRequest); i { case 0: return &v.state @@ -211573,7 +211573,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1352].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1352].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpsProxiesSetQuicOverrideRequest); i { case 0: return &v.state @@ -211585,7 +211585,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1353].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1353].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpsProxiesSetSslCertificatesRequest); i { case 0: return &v.state @@ -211597,7 +211597,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1354].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1354].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpsProxy); i { case 0: return &v.state @@ -211609,7 +211609,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1355].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1355].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpsProxyAggregatedList); i { case 0: return &v.state @@ -211621,7 +211621,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1356].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1356].Exporter = func(v any, i int) any { switch v := v.(*TargetHttpsProxyList); i { case 0: return &v.state @@ -211633,7 +211633,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1357].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1357].Exporter = func(v any, i int) any { switch v := v.(*TargetInstance); i { case 0: return &v.state @@ -211645,7 +211645,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1358].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1358].Exporter = func(v any, i int) any { switch v := v.(*TargetInstanceAggregatedList); i { case 0: return &v.state @@ -211657,7 +211657,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1359].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1359].Exporter = func(v any, i int) any { switch v := v.(*TargetInstanceList); i { case 0: return &v.state @@ -211669,7 +211669,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1360].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1360].Exporter = func(v any, i int) any { switch v := v.(*TargetInstancesScopedList); i { case 0: return &v.state @@ -211681,7 +211681,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1361].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1361].Exporter = func(v any, i int) any { switch v := v.(*TargetPool); i { case 0: return &v.state @@ -211693,7 +211693,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1362].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1362].Exporter = func(v any, i int) any { switch v := v.(*TargetPoolAggregatedList); i { case 0: return &v.state @@ -211705,7 +211705,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1363].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1363].Exporter = func(v any, i int) any { switch v := v.(*TargetPoolInstanceHealth); i { case 0: return &v.state @@ -211717,7 +211717,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1364].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1364].Exporter = func(v any, i int) any { switch v := v.(*TargetPoolList); i { case 0: return &v.state @@ -211729,7 +211729,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1365].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1365].Exporter = func(v any, i int) any { switch v := v.(*TargetPoolsAddHealthCheckRequest); i { case 0: return &v.state @@ -211741,7 +211741,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1366].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1366].Exporter = func(v any, i int) any { switch v := v.(*TargetPoolsAddInstanceRequest); i { case 0: return &v.state @@ -211753,7 +211753,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1367].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1367].Exporter = func(v any, i int) any { switch v := v.(*TargetPoolsRemoveHealthCheckRequest); i { case 0: return &v.state @@ -211765,7 +211765,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1368].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1368].Exporter = func(v any, i int) any { switch v := v.(*TargetPoolsRemoveInstanceRequest); i { case 0: return &v.state @@ -211777,7 +211777,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1369].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1369].Exporter = func(v any, i int) any { switch v := v.(*TargetPoolsScopedList); i { case 0: return &v.state @@ -211789,7 +211789,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1370].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1370].Exporter = func(v any, i int) any { switch v := v.(*TargetReference); i { case 0: return &v.state @@ -211801,7 +211801,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1371].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1371].Exporter = func(v any, i int) any { switch v := v.(*TargetSslProxiesSetBackendServiceRequest); i { case 0: return &v.state @@ -211813,7 +211813,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1372].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1372].Exporter = func(v any, i int) any { switch v := v.(*TargetSslProxiesSetCertificateMapRequest); i { case 0: return &v.state @@ -211825,7 +211825,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1373].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1373].Exporter = func(v any, i int) any { switch v := v.(*TargetSslProxiesSetProxyHeaderRequest); i { case 0: return &v.state @@ -211837,7 +211837,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1374].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1374].Exporter = func(v any, i int) any { switch v := v.(*TargetSslProxiesSetSslCertificatesRequest); i { case 0: return &v.state @@ -211849,7 +211849,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1375].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1375].Exporter = func(v any, i int) any { switch v := v.(*TargetSslProxy); i { case 0: return &v.state @@ -211861,7 +211861,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1376].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1376].Exporter = func(v any, i int) any { switch v := v.(*TargetSslProxyList); i { case 0: return &v.state @@ -211873,7 +211873,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1377].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1377].Exporter = func(v any, i int) any { switch v := v.(*TargetTcpProxiesScopedList); i { case 0: return &v.state @@ -211885,7 +211885,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1378].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1378].Exporter = func(v any, i int) any { switch v := v.(*TargetTcpProxiesSetBackendServiceRequest); i { case 0: return &v.state @@ -211897,7 +211897,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1379].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1379].Exporter = func(v any, i int) any { switch v := v.(*TargetTcpProxiesSetProxyHeaderRequest); i { case 0: return &v.state @@ -211909,7 +211909,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1380].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1380].Exporter = func(v any, i int) any { switch v := v.(*TargetTcpProxy); i { case 0: return &v.state @@ -211921,7 +211921,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1381].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1381].Exporter = func(v any, i int) any { switch v := v.(*TargetTcpProxyAggregatedList); i { case 0: return &v.state @@ -211933,7 +211933,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1382].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1382].Exporter = func(v any, i int) any { switch v := v.(*TargetTcpProxyList); i { case 0: return &v.state @@ -211945,7 +211945,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1383].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1383].Exporter = func(v any, i int) any { switch v := v.(*TargetVpnGateway); i { case 0: return &v.state @@ -211957,7 +211957,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1384].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1384].Exporter = func(v any, i int) any { switch v := v.(*TargetVpnGatewayAggregatedList); i { case 0: return &v.state @@ -211969,7 +211969,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1385].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1385].Exporter = func(v any, i int) any { switch v := v.(*TargetVpnGatewayList); i { case 0: return &v.state @@ -211981,7 +211981,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1386].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1386].Exporter = func(v any, i int) any { switch v := v.(*TargetVpnGatewaysScopedList); i { case 0: return &v.state @@ -211993,7 +211993,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1387].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1387].Exporter = func(v any, i int) any { switch v := v.(*TestFailure); i { case 0: return &v.state @@ -212005,7 +212005,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1388].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1388].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsBackendBucketRequest); i { case 0: return &v.state @@ -212017,7 +212017,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1389].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1389].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsBackendServiceRequest); i { case 0: return &v.state @@ -212029,7 +212029,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1390].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1390].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsDiskRequest); i { case 0: return &v.state @@ -212041,7 +212041,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1391].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1391].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsExternalVpnGatewayRequest); i { case 0: return &v.state @@ -212053,7 +212053,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1392].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1392].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsFirewallPolicyRequest); i { case 0: return &v.state @@ -212065,7 +212065,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1393].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1393].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsImageRequest); i { case 0: return &v.state @@ -212077,7 +212077,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1394].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1394].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsInstanceRequest); i { case 0: return &v.state @@ -212089,7 +212089,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1395].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1395].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsInstanceTemplateRequest); i { case 0: return &v.state @@ -212101,7 +212101,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1396].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1396].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsInstantSnapshotRequest); i { case 0: return &v.state @@ -212113,7 +212113,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1397].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1397].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsLicenseCodeRequest); i { case 0: return &v.state @@ -212125,7 +212125,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1398].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1398].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsLicenseRequest); i { case 0: return &v.state @@ -212137,7 +212137,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1399].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1399].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsMachineImageRequest); i { case 0: return &v.state @@ -212149,7 +212149,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1400].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1400].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsNetworkAttachmentRequest); i { case 0: return &v.state @@ -212161,7 +212161,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1401].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1401].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsNetworkEndpointGroupRequest); i { case 0: return &v.state @@ -212173,7 +212173,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1402].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1402].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -212185,7 +212185,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1403].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1403].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsNodeGroupRequest); i { case 0: return &v.state @@ -212197,7 +212197,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1404].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1404].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsNodeTemplateRequest); i { case 0: return &v.state @@ -212209,7 +212209,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1405].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1405].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsPacketMirroringRequest); i { case 0: return &v.state @@ -212221,7 +212221,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1406].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1406].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsRegionBackendServiceRequest); i { case 0: return &v.state @@ -212233,7 +212233,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1407].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1407].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsRegionDiskRequest); i { case 0: return &v.state @@ -212245,7 +212245,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1408].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1408].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsRegionInstantSnapshotRequest); i { case 0: return &v.state @@ -212257,7 +212257,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1409].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1409].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsRegionNetworkFirewallPolicyRequest); i { case 0: return &v.state @@ -212269,7 +212269,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1410].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1410].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsReservationRequest); i { case 0: return &v.state @@ -212281,7 +212281,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1411].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1411].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsResourcePolicyRequest); i { case 0: return &v.state @@ -212293,7 +212293,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1412].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1412].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsServiceAttachmentRequest); i { case 0: return &v.state @@ -212305,7 +212305,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1413].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1413].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsSnapshotRequest); i { case 0: return &v.state @@ -212317,7 +212317,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1414].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1414].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsStoragePoolRequest); i { case 0: return &v.state @@ -212329,7 +212329,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1415].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1415].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsSubnetworkRequest); i { case 0: return &v.state @@ -212341,7 +212341,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1416].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1416].Exporter = func(v any, i int) any { switch v := v.(*TestIamPermissionsVpnGatewayRequest); i { case 0: return &v.state @@ -212353,7 +212353,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1417].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1417].Exporter = func(v any, i int) any { switch v := v.(*TestPermissionsRequest); i { case 0: return &v.state @@ -212365,7 +212365,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1418].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1418].Exporter = func(v any, i int) any { switch v := v.(*TestPermissionsResponse); i { case 0: return &v.state @@ -212377,7 +212377,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1419].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1419].Exporter = func(v any, i int) any { switch v := v.(*Uint128); i { case 0: return &v.state @@ -212389,7 +212389,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1420].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1420].Exporter = func(v any, i int) any { switch v := v.(*UpcomingMaintenance); i { case 0: return &v.state @@ -212401,7 +212401,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1421].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1421].Exporter = func(v any, i int) any { switch v := v.(*UpdateAccessConfigInstanceRequest); i { case 0: return &v.state @@ -212413,7 +212413,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1422].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1422].Exporter = func(v any, i int) any { switch v := v.(*UpdateAutoscalerRequest); i { case 0: return &v.state @@ -212425,7 +212425,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1423].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1423].Exporter = func(v any, i int) any { switch v := v.(*UpdateBackendBucketRequest); i { case 0: return &v.state @@ -212437,7 +212437,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1424].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1424].Exporter = func(v any, i int) any { switch v := v.(*UpdateBackendServiceRequest); i { case 0: return &v.state @@ -212449,7 +212449,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1425].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1425].Exporter = func(v any, i int) any { switch v := v.(*UpdateDiskRequest); i { case 0: return &v.state @@ -212461,7 +212461,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1426].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1426].Exporter = func(v any, i int) any { switch v := v.(*UpdateDisplayDeviceInstanceRequest); i { case 0: return &v.state @@ -212473,7 +212473,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1427].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1427].Exporter = func(v any, i int) any { switch v := v.(*UpdateFirewallRequest); i { case 0: return &v.state @@ -212485,7 +212485,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1428].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1428].Exporter = func(v any, i int) any { switch v := v.(*UpdateHealthCheckRequest); i { case 0: return &v.state @@ -212497,7 +212497,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1429].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1429].Exporter = func(v any, i int) any { switch v := v.(*UpdateInstanceRequest); i { case 0: return &v.state @@ -212509,7 +212509,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1430].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1430].Exporter = func(v any, i int) any { switch v := v.(*UpdateNetworkInterfaceInstanceRequest); i { case 0: return &v.state @@ -212521,7 +212521,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1431].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1431].Exporter = func(v any, i int) any { switch v := v.(*UpdatePeeringNetworkRequest); i { case 0: return &v.state @@ -212533,7 +212533,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1432].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1432].Exporter = func(v any, i int) any { switch v := v.(*UpdatePerInstanceConfigsInstanceGroupManagerRequest); i { case 0: return &v.state @@ -212545,7 +212545,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1433].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1433].Exporter = func(v any, i int) any { switch v := v.(*UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest); i { case 0: return &v.state @@ -212557,7 +212557,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1434].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1434].Exporter = func(v any, i int) any { switch v := v.(*UpdateRegionAutoscalerRequest); i { case 0: return &v.state @@ -212569,7 +212569,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1435].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1435].Exporter = func(v any, i int) any { switch v := v.(*UpdateRegionBackendServiceRequest); i { case 0: return &v.state @@ -212581,7 +212581,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1436].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1436].Exporter = func(v any, i int) any { switch v := v.(*UpdateRegionCommitmentRequest); i { case 0: return &v.state @@ -212593,7 +212593,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1437].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1437].Exporter = func(v any, i int) any { switch v := v.(*UpdateRegionDiskRequest); i { case 0: return &v.state @@ -212605,7 +212605,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1438].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1438].Exporter = func(v any, i int) any { switch v := v.(*UpdateRegionHealthCheckRequest); i { case 0: return &v.state @@ -212617,7 +212617,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1439].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1439].Exporter = func(v any, i int) any { switch v := v.(*UpdateRegionUrlMapRequest); i { case 0: return &v.state @@ -212629,7 +212629,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1440].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1440].Exporter = func(v any, i int) any { switch v := v.(*UpdateReservationRequest); i { case 0: return &v.state @@ -212641,7 +212641,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1441].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1441].Exporter = func(v any, i int) any { switch v := v.(*UpdateRouterRequest); i { case 0: return &v.state @@ -212653,7 +212653,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1442].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1442].Exporter = func(v any, i int) any { switch v := v.(*UpdateShieldedInstanceConfigInstanceRequest); i { case 0: return &v.state @@ -212665,7 +212665,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1443].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1443].Exporter = func(v any, i int) any { switch v := v.(*UpdateStoragePoolRequest); i { case 0: return &v.state @@ -212677,7 +212677,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1444].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1444].Exporter = func(v any, i int) any { switch v := v.(*UpdateUrlMapRequest); i { case 0: return &v.state @@ -212689,7 +212689,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1445].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1445].Exporter = func(v any, i int) any { switch v := v.(*UrlMap); i { case 0: return &v.state @@ -212701,7 +212701,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1446].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1446].Exporter = func(v any, i int) any { switch v := v.(*UrlMapList); i { case 0: return &v.state @@ -212713,7 +212713,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1447].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1447].Exporter = func(v any, i int) any { switch v := v.(*UrlMapReference); i { case 0: return &v.state @@ -212725,7 +212725,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1448].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1448].Exporter = func(v any, i int) any { switch v := v.(*UrlMapTest); i { case 0: return &v.state @@ -212737,7 +212737,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1449].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1449].Exporter = func(v any, i int) any { switch v := v.(*UrlMapTestHeader); i { case 0: return &v.state @@ -212749,7 +212749,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1450].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1450].Exporter = func(v any, i int) any { switch v := v.(*UrlMapValidationResult); i { case 0: return &v.state @@ -212761,7 +212761,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1451].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1451].Exporter = func(v any, i int) any { switch v := v.(*UrlMapsAggregatedList); i { case 0: return &v.state @@ -212773,7 +212773,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1452].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1452].Exporter = func(v any, i int) any { switch v := v.(*UrlMapsScopedList); i { case 0: return &v.state @@ -212785,7 +212785,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1453].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1453].Exporter = func(v any, i int) any { switch v := v.(*UrlMapsValidateRequest); i { case 0: return &v.state @@ -212797,7 +212797,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1454].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1454].Exporter = func(v any, i int) any { switch v := v.(*UrlMapsValidateResponse); i { case 0: return &v.state @@ -212809,7 +212809,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1455].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1455].Exporter = func(v any, i int) any { switch v := v.(*UrlRewrite); i { case 0: return &v.state @@ -212821,7 +212821,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1456].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1456].Exporter = func(v any, i int) any { switch v := v.(*UsableSubnetwork); i { case 0: return &v.state @@ -212833,7 +212833,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1457].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1457].Exporter = func(v any, i int) any { switch v := v.(*UsableSubnetworkSecondaryRange); i { case 0: return &v.state @@ -212845,7 +212845,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1458].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1458].Exporter = func(v any, i int) any { switch v := v.(*UsableSubnetworksAggregatedList); i { case 0: return &v.state @@ -212857,7 +212857,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1459].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1459].Exporter = func(v any, i int) any { switch v := v.(*UsageExportLocation); i { case 0: return &v.state @@ -212869,7 +212869,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1460].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1460].Exporter = func(v any, i int) any { switch v := v.(*ValidateRegionUrlMapRequest); i { case 0: return &v.state @@ -212881,7 +212881,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1461].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1461].Exporter = func(v any, i int) any { switch v := v.(*ValidateUrlMapRequest); i { case 0: return &v.state @@ -212893,7 +212893,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1462].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1462].Exporter = func(v any, i int) any { switch v := v.(*VmEndpointNatMappings); i { case 0: return &v.state @@ -212905,7 +212905,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1463].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1463].Exporter = func(v any, i int) any { switch v := v.(*VmEndpointNatMappingsInterfaceNatMappings); i { case 0: return &v.state @@ -212917,7 +212917,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1464].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1464].Exporter = func(v any, i int) any { switch v := v.(*VmEndpointNatMappingsInterfaceNatMappingsNatRuleMappings); i { case 0: return &v.state @@ -212929,7 +212929,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1465].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1465].Exporter = func(v any, i int) any { switch v := v.(*VmEndpointNatMappingsList); i { case 0: return &v.state @@ -212941,7 +212941,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1466].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1466].Exporter = func(v any, i int) any { switch v := v.(*VpnGateway); i { case 0: return &v.state @@ -212953,7 +212953,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1467].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1467].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewayAggregatedList); i { case 0: return &v.state @@ -212965,7 +212965,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1468].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1468].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewayList); i { case 0: return &v.state @@ -212977,7 +212977,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1469].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1469].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewayStatus); i { case 0: return &v.state @@ -212989,7 +212989,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1470].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1470].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewayStatusHighAvailabilityRequirementState); i { case 0: return &v.state @@ -213001,7 +213001,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1471].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1471].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewayStatusTunnel); i { case 0: return &v.state @@ -213013,7 +213013,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1472].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1472].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewayStatusVpnConnection); i { case 0: return &v.state @@ -213025,7 +213025,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1473].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1473].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewayVpnGatewayInterface); i { case 0: return &v.state @@ -213037,7 +213037,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1474].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1474].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewaysGetStatusResponse); i { case 0: return &v.state @@ -213049,7 +213049,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1475].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1475].Exporter = func(v any, i int) any { switch v := v.(*VpnGatewaysScopedList); i { case 0: return &v.state @@ -213061,7 +213061,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1476].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1476].Exporter = func(v any, i int) any { switch v := v.(*VpnTunnel); i { case 0: return &v.state @@ -213073,7 +213073,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1477].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1477].Exporter = func(v any, i int) any { switch v := v.(*VpnTunnelAggregatedList); i { case 0: return &v.state @@ -213085,7 +213085,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1478].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1478].Exporter = func(v any, i int) any { switch v := v.(*VpnTunnelList); i { case 0: return &v.state @@ -213097,7 +213097,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1479].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1479].Exporter = func(v any, i int) any { switch v := v.(*VpnTunnelsScopedList); i { case 0: return &v.state @@ -213109,7 +213109,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1480].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1480].Exporter = func(v any, i int) any { switch v := v.(*WafExpressionSet); i { case 0: return &v.state @@ -213121,7 +213121,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1481].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1481].Exporter = func(v any, i int) any { switch v := v.(*WafExpressionSetExpression); i { case 0: return &v.state @@ -213133,7 +213133,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1482].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1482].Exporter = func(v any, i int) any { switch v := v.(*WaitGlobalOperationRequest); i { case 0: return &v.state @@ -213145,7 +213145,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1483].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1483].Exporter = func(v any, i int) any { switch v := v.(*WaitRegionOperationRequest); i { case 0: return &v.state @@ -213157,7 +213157,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1484].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1484].Exporter = func(v any, i int) any { switch v := v.(*WaitZoneOperationRequest); i { case 0: return &v.state @@ -213169,7 +213169,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1485].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1485].Exporter = func(v any, i int) any { switch v := v.(*Warning); i { case 0: return &v.state @@ -213181,7 +213181,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1486].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1486].Exporter = func(v any, i int) any { switch v := v.(*Warnings); i { case 0: return &v.state @@ -213193,7 +213193,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1487].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1487].Exporter = func(v any, i int) any { switch v := v.(*WeightedBackendService); i { case 0: return &v.state @@ -213205,7 +213205,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1488].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1488].Exporter = func(v any, i int) any { switch v := v.(*WithdrawPublicAdvertisedPrefixeRequest); i { case 0: return &v.state @@ -213217,7 +213217,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1489].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1489].Exporter = func(v any, i int) any { switch v := v.(*WithdrawPublicDelegatedPrefixeRequest); i { case 0: return &v.state @@ -213229,7 +213229,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1490].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1490].Exporter = func(v any, i int) any { switch v := v.(*XpnHostList); i { case 0: return &v.state @@ -213241,7 +213241,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1491].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1491].Exporter = func(v any, i int) any { switch v := v.(*XpnResourceId); i { case 0: return &v.state @@ -213253,7 +213253,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1492].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1492].Exporter = func(v any, i int) any { switch v := v.(*Zone); i { case 0: return &v.state @@ -213265,7 +213265,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1493].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1493].Exporter = func(v any, i int) any { switch v := v.(*ZoneList); i { case 0: return &v.state @@ -213277,7 +213277,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1494].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1494].Exporter = func(v any, i int) any { switch v := v.(*ZoneSetLabelsRequest); i { case 0: return &v.state @@ -213289,7 +213289,7 @@ func file_google_cloud_compute_v1_compute_proto_init() { return nil } } - file_google_cloud_compute_v1_compute_proto_msgTypes[1495].Exporter = func(v interface{}, i int) interface{} { + file_google_cloud_compute_v1_compute_proto_msgTypes[1495].Exporter = func(v any, i int) any { switch v := v.(*ZoneSetPolicyRequest); i { case 0: return &v.state @@ -213302,1244 +213302,1244 @@ func file_google_cloud_compute_v1_compute_proto_init() { } } } - file_google_cloud_compute_v1_compute_proto_msgTypes[0].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[2].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[4].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[5].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[6].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[7].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[8].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[9].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[10].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[11].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[12].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[14].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[15].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[16].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[18].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[19].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[20].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[21].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[22].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[23].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[24].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[25].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[26].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[27].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[28].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[29].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[30].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[31].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[32].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[33].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[34].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[35].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[36].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[37].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[38].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[39].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[40].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[41].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[42].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[43].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[44].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[45].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[46].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[47].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[48].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[49].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[50].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[51].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[52].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[53].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[54].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[55].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[56].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[57].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[58].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[59].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[60].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[61].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[62].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[63].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[64].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[65].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[66].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[67].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[68].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[69].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[70].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[71].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[72].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[73].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[74].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[75].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[76].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[77].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[78].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[79].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[80].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[81].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[82].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[83].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[84].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[85].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[86].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[87].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[88].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[89].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[92].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[93].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[94].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[95].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[96].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[97].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[98].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[99].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[100].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[101].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[102].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[103].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[104].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[105].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[106].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[107].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[108].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[109].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[110].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[111].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[112].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[113].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[114].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[115].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[117].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[118].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[119].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[120].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[121].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[122].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[123].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[124].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[125].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[126].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[127].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[128].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[129].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[130].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[131].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[132].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[133].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[134].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[135].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[136].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[137].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[138].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[139].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[140].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[141].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[142].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[143].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[144].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[145].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[146].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[147].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[148].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[149].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[150].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[151].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[152].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[153].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[154].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[155].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[156].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[157].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[158].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[159].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[160].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[161].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[162].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[163].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[164].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[165].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[166].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[167].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[168].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[169].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[170].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[171].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[172].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[173].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[174].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[175].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[176].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[177].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[178].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[179].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[180].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[181].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[182].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[183].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[184].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[185].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[188].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[190].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[191].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[192].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[193].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[194].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[195].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[196].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[197].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[198].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[199].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[200].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[201].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[202].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[203].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[204].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[205].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[206].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[207].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[208].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[209].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[210].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[211].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[212].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[213].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[216].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[217].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[218].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[219].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[220].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[221].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[222].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[223].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[224].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[225].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[226].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[227].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[228].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[231].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[232].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[233].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[234].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[235].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[236].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[237].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[238].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[239].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[240].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[241].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[242].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[243].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[244].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[245].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[246].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[247].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[248].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[249].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[250].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[251].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[252].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[253].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[254].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[255].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[256].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[257].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[258].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[259].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[260].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[261].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[264].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[265].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[266].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[267].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[268].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[269].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[270].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[271].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[272].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[273].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[274].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[275].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[276].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[277].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[278].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[279].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[281].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[282].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[283].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[284].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[285].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[286].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[289].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[290].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[291].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[292].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[293].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[294].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[295].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[296].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[297].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[298].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[300].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[301].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[302].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[303].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[304].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[305].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[306].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[307].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[308].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[309].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[310].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[311].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[312].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[313].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[314].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[315].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[316].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[317].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[318].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[320].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[321].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[322].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[323].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[324].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[325].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[326].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[327].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[328].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[329].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[332].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[333].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[334].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[353].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[355].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[360].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[361].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[362].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[363].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[364].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[365].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[366].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[367].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[368].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[369].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[370].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[371].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[372].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[373].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[374].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[375].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[376].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[377].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[378].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[379].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[380].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[381].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[382].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[383].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[402].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[403].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[444].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[445].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[446].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[447].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[448].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[451].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[474].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[477].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[480].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[481].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[482].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[483].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[484].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[486].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[487].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[488].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[489].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[490].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[491].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[492].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[493].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[494].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[495].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[496].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[497].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[498].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[499].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[500].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[502].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[503].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[504].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[505].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[506].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[508].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[509].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[510].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[511].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[512].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[513].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[514].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[515].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[516].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[517].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[518].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[519].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[520].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[521].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[522].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[523].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[524].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[525].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[526].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[527].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[528].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[529].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[530].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[531].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[532].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[533].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[534].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[535].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[536].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[537].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[538].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[539].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[540].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[541].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[542].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[543].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[544].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[545].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[546].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[547].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[548].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[549].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[550].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[551].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[552].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[553].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[554].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[555].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[556].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[557].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[558].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[559].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[560].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[561].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[562].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[563].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[564].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[565].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[566].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[567].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[568].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[569].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[570].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[571].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[572].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[573].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[574].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[575].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[576].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[577].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[578].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[579].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[580].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[581].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[582].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[583].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[584].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[585].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[586].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[587].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[588].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[589].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[590].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[591].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[592].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[593].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[594].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[595].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[596].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[597].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[598].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[599].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[600].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[601].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[602].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[603].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[604].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[605].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[606].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[607].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[608].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[609].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[610].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[611].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[612].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[613].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[614].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[615].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[616].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[617].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[618].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[619].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[620].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[622].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[624].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[626].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[627].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[628].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[631].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[632].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[633].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[636].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[637].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[639].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[640].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[641].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[642].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[643].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[644].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[645].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[646].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[648].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[650].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[651].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[652].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[653].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[654].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[655].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[656].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[657].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[661].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[663].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[664].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[666].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[667].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[668].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[669].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[670].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[672].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[673].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[674].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[675].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[676].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[677].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[678].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[679].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[680].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[681].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[682].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[683].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[684].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[685].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[686].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[687].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[688].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[689].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[690].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[691].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[692].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[693].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[694].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[695].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[696].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[697].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[698].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[700].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[701].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[702].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[703].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[704].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[705].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[706].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[707].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[708].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[709].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[710].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[711].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[712].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[713].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[714].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[715].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[716].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[717].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[718].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[719].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[720].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[721].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[722].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[723].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[724].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[725].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[726].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[727].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[728].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[729].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[730].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[731].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[732].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[733].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[734].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[735].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[736].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[737].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[738].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[739].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[740].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[741].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[742].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[743].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[744].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[745].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[746].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[747].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[748].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[749].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[750].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[751].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[752].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[753].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[754].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[755].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[756].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[757].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[758].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[759].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[760].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[761].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[762].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[763].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[764].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[765].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[766].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[767].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[768].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[769].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[770].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[771].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[772].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[773].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[774].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[775].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[776].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[777].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[778].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[779].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[780].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[781].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[782].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[783].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[784].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[785].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[786].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[787].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[788].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[789].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[790].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[791].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[792].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[793].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[794].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[795].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[796].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[797].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[798].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[799].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[800].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[801].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[802].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[803].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[804].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[805].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[806].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[807].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[808].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[809].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[810].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[811].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[812].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[813].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[814].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[815].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[816].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[817].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[818].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[819].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[820].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[821].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[822].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[823].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[824].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[825].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[826].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[827].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[828].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[829].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[830].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[831].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[832].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[833].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[834].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[835].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[836].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[837].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[838].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[839].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[840].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[841].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[842].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[843].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[844].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[845].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[846].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[847].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[848].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[849].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[850].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[851].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[852].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[853].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[854].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[855].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[856].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[857].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[858].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[859].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[860].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[861].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[862].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[864].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[865].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[866].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[867].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[868].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[869].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[870].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[871].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[872].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[873].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[874].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[875].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[876].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[877].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[878].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[879].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[880].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[883].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[884].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[885].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[886].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[887].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[888].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[889].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[890].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[891].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[892].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[894].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[895].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[896].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[897].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[898].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[899].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[900].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[901].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[902].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[903].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[905].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[906].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[907].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[908].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[910].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[911].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[912].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[913].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[914].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[915].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[916].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[917].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[918].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[919].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[920].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[921].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[922].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[923].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[924].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[925].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[926].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[927].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[928].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[929].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[930].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[931].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[932].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[934].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[935].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[936].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[937].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[938].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[939].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[940].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[941].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[942].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[943].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[944].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[945].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[946].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[947].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[948].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[949].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[950].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[951].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[952].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[953].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[954].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[955].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[956].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[957].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[958].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[959].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[960].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[961].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[962].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[963].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[964].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[965].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[966].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[967].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[968].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[969].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[970].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[971].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[972].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[973].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[974].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[975].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[976].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[977].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[978].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[979].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[980].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[981].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[982].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[983].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[984].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[985].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[986].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[987].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[988].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[989].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[990].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[991].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[992].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[993].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[996].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[997].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[998].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1000].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1001].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1002].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1003].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1004].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1005].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1006].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1007].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1008].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1009].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1010].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1011].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1012].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1013].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1014].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1015].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1016].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1017].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1018].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1019].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1020].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1021].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1022].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1023].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1024].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1025].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1028].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1029].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1030].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1032].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1036].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1038].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1039].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1040].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1041].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1043].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1044].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1045].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1046].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1047].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1048].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1052].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1053].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1054].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1056].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1057].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1058].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1059].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1060].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1061].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1062].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1063].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1064].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1065].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1066].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1067].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1068].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1069].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1070].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1071].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1072].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1073].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1074].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1075].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1076].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1077].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1078].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1079].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1080].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1081].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1082].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1083].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1084].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1085].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1086].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1087].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1088].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1089].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1090].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1092].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1093].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1094].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1095].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1096].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1097].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1098].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1099].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1100].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1101].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1102].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1104].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1105].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1106].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1107].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1108].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1109].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1110].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1111].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1112].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1113].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1114].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1115].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1116].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1117].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1118].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1119].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1120].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1121].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1122].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1124].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1125].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1126].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1127].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1128].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1129].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1130].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1131].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1132].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1133].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1134].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1135].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1136].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1137].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1138].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1139].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1140].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1141].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1142].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1143].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1144].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1145].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1146].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1147].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1148].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1149].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1151].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1152].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1153].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1154].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1155].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1157].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1158].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1160].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1163].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1165].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1166].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1167].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1168].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1169].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1170].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1171].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1172].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1175].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1176].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1177].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1178].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1179].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1180].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1181].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1182].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1183].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1184].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1185].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1186].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1187].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1188].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1189].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1190].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1191].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1192].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1193].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1194].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1195].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1196].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1197].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1222].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1223].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1224].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1225].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1227].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1231].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1232].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1233].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1235].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1236].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1239].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1240].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1241].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1242].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1243].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1244].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1245].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1246].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1247].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1248].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1249].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1250].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1251].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1252].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1253].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1254].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1255].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1256].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1257].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1258].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1259].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1260].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1261].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1262].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1263].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1264].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1265].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1266].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1267].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1268].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1269].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1270].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1271].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1272].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1273].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1274].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1275].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1276].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1277].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1278].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1279].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1280].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1281].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1282].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1283].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1284].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1285].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1286].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1287].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1288].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1289].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1290].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1291].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1293].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1294].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1295].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1296].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1297].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1298].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1299].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1300].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1301].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1303].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1304].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1305].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1306].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1307].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1308].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1309].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1310].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1312].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1313].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1314].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1315].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1316].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1317].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1318].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1319].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1320].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1321].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1322].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1323].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1324].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1325].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1326].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1327].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1328].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1329].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1330].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1331].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1332].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1333].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1334].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1335].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1336].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1337].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1338].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1339].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1340].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1341].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1342].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1343].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1344].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1345].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1346].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1347].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1348].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1349].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1350].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1351].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1352].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1354].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1355].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1356].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1357].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1358].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1359].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1360].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1361].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1362].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1363].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1364].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1369].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1370].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1371].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1372].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1373].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1375].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1376].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1377].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1378].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1379].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1380].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1381].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1382].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1383].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1384].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1385].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1386].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1387].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1419].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1420].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1421].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1422].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1423].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1424].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1425].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1426].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1427].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1428].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1429].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1430].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1431].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1432].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1433].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1434].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1435].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1436].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1437].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1438].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1439].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1440].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1441].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1442].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1443].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1444].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1445].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1446].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1447].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1448].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1449].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1450].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1451].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1452].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1453].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1454].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1455].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1456].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1457].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1458].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1459].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1462].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1463].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1464].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1465].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1466].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1467].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1468].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1470].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1471].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1472].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1473].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1474].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1475].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1476].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1477].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1478].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1479].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1480].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1481].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1485].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1486].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1487].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1488].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1489].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1490].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1491].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1492].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1493].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1494].OneofWrappers = []interface{}{} - file_google_cloud_compute_v1_compute_proto_msgTypes[1495].OneofWrappers = []interface{}{} + file_google_cloud_compute_v1_compute_proto_msgTypes[0].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[2].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[3].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[4].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[5].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[6].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[7].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[8].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[9].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[10].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[11].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[12].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[13].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[14].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[15].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[16].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[17].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[18].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[19].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[20].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[21].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[22].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[23].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[24].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[25].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[26].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[27].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[28].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[29].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[30].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[31].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[32].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[33].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[34].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[35].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[36].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[37].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[38].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[39].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[40].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[41].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[42].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[43].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[44].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[45].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[46].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[47].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[48].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[49].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[50].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[51].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[52].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[53].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[54].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[55].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[56].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[57].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[58].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[59].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[60].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[61].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[62].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[63].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[64].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[65].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[66].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[67].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[68].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[69].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[70].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[71].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[72].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[73].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[74].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[75].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[76].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[77].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[78].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[79].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[80].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[81].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[82].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[83].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[84].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[85].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[86].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[87].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[88].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[89].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[92].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[93].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[94].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[95].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[96].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[97].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[98].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[99].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[100].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[101].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[102].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[103].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[104].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[105].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[106].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[107].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[108].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[109].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[110].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[111].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[112].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[113].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[114].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[115].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[117].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[118].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[119].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[120].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[121].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[122].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[123].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[124].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[125].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[126].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[127].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[128].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[129].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[130].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[131].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[132].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[133].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[134].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[135].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[136].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[137].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[138].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[139].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[140].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[141].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[142].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[143].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[144].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[145].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[146].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[147].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[148].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[149].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[150].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[151].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[152].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[153].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[154].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[155].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[156].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[157].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[158].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[159].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[160].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[161].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[162].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[163].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[164].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[165].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[166].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[167].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[168].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[169].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[170].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[171].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[172].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[173].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[174].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[175].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[176].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[177].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[178].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[179].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[180].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[181].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[182].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[183].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[184].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[185].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[188].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[190].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[191].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[192].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[193].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[194].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[195].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[196].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[197].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[198].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[199].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[200].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[201].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[202].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[203].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[204].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[205].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[206].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[207].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[208].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[209].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[210].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[211].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[212].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[213].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[216].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[217].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[218].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[219].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[220].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[221].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[222].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[223].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[224].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[225].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[226].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[227].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[228].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[231].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[232].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[233].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[234].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[235].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[236].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[237].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[238].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[239].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[240].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[241].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[242].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[243].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[244].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[245].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[246].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[247].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[248].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[249].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[250].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[251].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[252].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[253].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[254].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[255].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[256].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[257].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[258].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[259].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[260].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[261].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[264].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[265].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[266].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[267].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[268].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[269].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[270].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[271].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[272].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[273].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[274].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[275].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[276].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[277].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[278].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[279].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[281].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[282].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[283].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[284].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[285].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[286].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[289].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[290].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[291].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[292].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[293].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[294].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[295].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[296].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[297].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[298].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[300].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[301].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[302].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[303].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[304].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[305].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[306].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[307].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[308].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[309].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[310].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[311].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[312].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[313].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[314].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[315].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[316].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[317].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[318].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[320].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[321].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[322].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[323].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[324].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[325].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[326].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[327].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[328].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[329].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[332].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[333].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[334].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[353].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[355].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[360].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[361].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[362].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[363].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[364].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[365].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[366].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[367].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[368].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[369].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[370].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[371].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[372].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[373].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[374].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[375].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[376].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[377].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[378].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[379].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[380].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[381].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[382].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[383].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[402].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[403].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[444].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[445].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[446].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[447].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[448].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[451].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[474].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[477].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[480].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[481].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[482].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[483].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[484].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[486].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[487].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[488].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[489].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[490].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[491].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[492].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[493].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[494].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[495].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[496].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[497].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[498].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[499].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[500].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[502].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[503].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[504].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[505].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[506].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[508].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[509].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[510].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[511].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[512].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[513].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[514].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[515].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[516].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[517].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[518].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[519].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[520].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[521].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[522].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[523].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[524].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[525].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[526].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[527].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[528].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[529].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[530].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[531].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[532].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[533].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[534].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[535].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[536].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[537].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[538].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[539].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[540].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[541].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[542].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[543].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[544].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[545].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[546].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[547].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[548].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[549].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[550].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[551].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[552].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[553].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[554].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[555].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[556].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[557].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[558].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[559].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[560].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[561].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[562].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[563].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[564].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[565].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[566].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[567].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[568].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[569].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[570].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[571].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[572].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[573].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[574].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[575].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[576].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[577].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[578].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[579].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[580].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[581].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[582].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[583].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[584].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[585].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[586].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[587].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[588].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[589].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[590].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[591].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[592].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[593].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[594].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[595].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[596].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[597].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[598].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[599].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[600].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[601].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[602].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[603].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[604].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[605].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[606].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[607].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[608].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[609].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[610].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[611].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[612].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[613].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[614].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[615].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[616].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[617].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[618].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[619].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[620].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[622].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[624].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[626].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[627].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[628].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[631].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[632].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[633].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[636].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[637].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[639].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[640].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[641].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[642].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[643].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[644].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[645].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[646].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[648].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[650].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[651].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[652].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[653].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[654].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[655].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[656].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[657].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[661].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[663].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[664].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[666].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[667].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[668].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[669].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[670].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[672].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[673].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[674].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[675].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[676].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[677].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[678].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[679].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[680].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[681].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[682].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[683].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[684].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[685].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[686].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[687].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[688].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[689].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[690].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[691].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[692].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[693].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[694].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[695].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[696].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[697].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[698].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[700].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[701].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[702].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[703].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[704].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[705].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[706].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[707].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[708].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[709].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[710].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[711].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[712].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[713].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[714].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[715].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[716].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[717].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[718].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[719].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[720].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[721].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[722].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[723].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[724].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[725].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[726].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[727].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[728].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[729].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[730].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[731].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[732].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[733].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[734].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[735].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[736].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[737].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[738].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[739].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[740].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[741].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[742].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[743].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[744].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[745].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[746].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[747].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[748].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[749].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[750].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[751].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[752].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[753].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[754].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[755].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[756].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[757].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[758].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[759].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[760].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[761].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[762].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[763].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[764].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[765].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[766].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[767].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[768].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[769].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[770].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[771].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[772].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[773].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[774].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[775].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[776].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[777].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[778].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[779].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[780].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[781].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[782].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[783].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[784].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[785].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[786].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[787].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[788].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[789].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[790].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[791].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[792].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[793].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[794].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[795].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[796].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[797].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[798].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[799].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[800].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[801].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[802].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[803].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[804].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[805].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[806].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[807].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[808].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[809].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[810].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[811].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[812].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[813].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[814].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[815].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[816].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[817].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[818].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[819].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[820].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[821].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[822].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[823].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[824].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[825].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[826].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[827].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[828].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[829].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[830].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[831].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[832].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[833].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[834].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[835].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[836].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[837].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[838].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[839].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[840].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[841].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[842].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[843].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[844].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[845].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[846].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[847].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[848].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[849].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[850].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[851].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[852].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[853].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[854].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[855].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[856].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[857].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[858].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[859].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[860].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[861].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[862].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[864].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[865].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[866].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[867].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[868].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[869].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[870].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[871].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[872].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[873].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[874].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[875].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[876].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[877].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[878].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[879].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[880].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[883].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[884].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[885].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[886].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[887].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[888].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[889].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[890].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[891].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[892].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[894].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[895].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[896].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[897].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[898].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[899].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[900].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[901].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[902].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[903].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[905].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[906].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[907].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[908].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[910].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[911].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[912].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[913].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[914].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[915].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[916].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[917].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[918].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[919].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[920].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[921].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[922].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[923].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[924].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[925].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[926].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[927].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[928].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[929].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[930].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[931].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[932].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[934].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[935].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[936].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[937].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[938].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[939].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[940].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[941].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[942].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[943].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[944].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[945].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[946].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[947].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[948].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[949].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[950].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[951].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[952].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[953].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[954].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[955].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[956].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[957].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[958].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[959].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[960].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[961].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[962].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[963].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[964].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[965].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[966].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[967].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[968].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[969].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[970].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[971].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[972].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[973].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[974].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[975].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[976].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[977].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[978].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[979].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[980].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[981].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[982].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[983].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[984].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[985].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[986].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[987].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[988].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[989].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[990].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[991].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[992].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[993].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[996].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[997].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[998].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1000].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1001].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1002].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1003].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1004].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1005].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1006].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1007].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1008].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1009].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1010].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1011].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1012].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1013].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1014].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1015].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1016].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1017].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1018].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1019].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1020].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1021].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1022].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1023].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1024].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1025].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1028].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1029].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1030].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1032].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1036].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1038].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1039].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1040].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1041].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1043].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1044].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1045].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1046].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1047].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1048].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1052].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1053].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1054].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1056].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1057].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1058].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1059].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1060].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1061].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1062].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1063].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1064].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1065].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1066].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1067].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1068].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1069].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1070].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1071].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1072].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1073].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1074].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1075].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1076].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1077].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1078].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1079].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1080].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1081].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1082].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1083].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1084].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1085].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1086].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1087].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1088].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1089].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1090].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1092].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1093].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1094].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1095].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1096].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1097].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1098].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1099].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1100].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1101].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1102].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1104].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1105].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1106].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1107].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1108].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1109].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1110].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1111].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1112].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1113].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1114].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1115].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1116].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1117].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1118].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1119].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1120].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1121].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1122].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1124].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1125].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1126].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1127].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1128].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1129].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1130].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1131].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1132].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1133].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1134].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1135].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1136].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1137].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1138].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1139].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1140].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1141].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1142].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1143].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1144].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1145].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1146].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1147].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1148].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1149].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1151].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1152].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1153].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1154].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1155].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1157].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1158].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1160].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1163].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1165].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1166].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1167].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1168].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1169].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1170].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1171].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1172].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1175].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1176].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1177].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1178].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1179].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1180].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1181].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1182].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1183].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1184].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1185].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1186].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1187].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1188].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1189].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1190].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1191].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1192].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1193].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1194].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1195].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1196].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1197].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1222].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1223].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1224].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1225].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1227].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1231].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1232].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1233].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1235].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1236].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1239].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1240].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1241].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1242].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1243].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1244].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1245].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1246].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1247].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1248].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1249].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1250].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1251].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1252].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1253].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1254].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1255].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1256].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1257].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1258].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1259].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1260].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1261].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1262].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1263].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1264].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1265].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1266].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1267].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1268].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1269].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1270].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1271].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1272].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1273].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1274].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1275].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1276].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1277].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1278].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1279].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1280].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1281].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1282].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1283].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1284].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1285].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1286].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1287].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1288].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1289].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1290].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1291].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1293].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1294].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1295].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1296].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1297].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1298].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1299].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1300].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1301].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1303].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1304].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1305].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1306].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1307].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1308].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1309].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1310].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1312].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1313].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1314].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1315].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1316].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1317].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1318].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1319].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1320].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1321].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1322].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1323].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1324].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1325].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1326].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1327].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1328].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1329].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1330].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1331].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1332].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1333].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1334].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1335].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1336].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1337].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1338].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1339].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1340].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1341].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1342].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1343].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1344].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1345].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1346].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1347].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1348].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1349].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1350].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1351].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1352].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1354].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1355].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1356].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1357].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1358].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1359].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1360].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1361].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1362].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1363].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1364].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1369].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1370].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1371].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1372].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1373].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1375].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1376].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1377].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1378].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1379].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1380].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1381].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1382].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1383].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1384].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1385].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1386].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1387].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1419].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1420].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1421].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1422].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1423].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1424].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1425].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1426].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1427].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1428].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1429].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1430].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1431].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1432].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1433].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1434].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1435].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1436].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1437].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1438].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1439].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1440].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1441].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1442].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1443].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1444].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1445].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1446].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1447].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1448].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1449].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1450].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1451].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1452].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1453].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1454].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1455].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1456].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1457].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1458].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1459].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1462].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1463].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1464].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1465].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1466].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1467].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1468].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1470].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1471].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1472].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1473].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1474].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1475].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1476].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1477].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1478].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1479].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1480].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1481].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1485].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1486].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1487].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1488].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1489].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1490].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1491].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1492].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1493].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1494].OneofWrappers = []any{} + file_google_cloud_compute_v1_compute_proto_msgTypes[1495].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/cloud.google.com/go/compute/apiv1/disk_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/disk_types_client.go index 9232ea6b0..8b9a9f461 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/disk_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/disk_types_client.go @@ -192,6 +192,7 @@ func defaultDiskTypesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -201,7 +202,9 @@ func defaultDiskTypesRESTClientOptions() []option.ClientOption { func (c *diskTypesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/disks_client.go b/vendor/cloud.google.com/go/compute/apiv1/disks_client.go index ab52449fa..a93fa6788 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/disks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/disks_client.go @@ -365,6 +365,7 @@ func defaultDisksRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -374,7 +375,9 @@ func defaultDisksRESTClientOptions() []option.ClientOption { func (c *disksRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/external_vpn_gateways_client.go b/vendor/cloud.google.com/go/compute/apiv1/external_vpn_gateways_client.go index 5a8125d5a..f5be67dd3 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/external_vpn_gateways_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/external_vpn_gateways_client.go @@ -226,6 +226,7 @@ func defaultExternalVpnGatewaysRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -235,7 +236,9 @@ func defaultExternalVpnGatewaysRESTClientOptions() []option.ClientOption { func (c *externalVpnGatewaysRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/firewall_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/firewall_policies_client.go index 5b2910a07..81f5db0a9 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/firewall_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/firewall_policies_client.go @@ -382,6 +382,7 @@ func defaultFirewallPoliciesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -391,7 +392,9 @@ func defaultFirewallPoliciesRESTClientOptions() []option.ClientOption { func (c *firewallPoliciesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/firewalls_client.go b/vendor/cloud.google.com/go/compute/apiv1/firewalls_client.go index 14687a445..43180db03 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/firewalls_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/firewalls_client.go @@ -226,6 +226,7 @@ func defaultFirewallsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -235,7 +236,9 @@ func defaultFirewallsRESTClientOptions() []option.ClientOption { func (c *firewallsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/forwarding_rules_client.go b/vendor/cloud.google.com/go/compute/apiv1/forwarding_rules_client.go index 60638d2bb..015568945 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/forwarding_rules_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/forwarding_rules_client.go @@ -256,6 +256,7 @@ func defaultForwardingRulesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -265,7 +266,9 @@ func defaultForwardingRulesRESTClientOptions() []option.ClientOption { func (c *forwardingRulesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_addresses_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_addresses_client.go index 5b3ad8ebc..fa69f00cf 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_addresses_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_addresses_client.go @@ -226,6 +226,7 @@ func defaultGlobalAddressesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -235,7 +236,9 @@ func defaultGlobalAddressesRESTClientOptions() []option.ClientOption { func (c *globalAddressesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_forwarding_rules_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_forwarding_rules_client.go index 13eed1b64..c742c0949 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_forwarding_rules_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_forwarding_rules_client.go @@ -236,6 +236,7 @@ func defaultGlobalForwardingRulesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultGlobalForwardingRulesRESTClientOptions() []option.ClientOption { func (c *globalForwardingRulesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_network_endpoint_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_network_endpoint_groups_client.go index e21973dcf..be6570688 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_network_endpoint_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_network_endpoint_groups_client.go @@ -236,6 +236,7 @@ func defaultGlobalNetworkEndpointGroupsRESTClientOptions() []option.ClientOption internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultGlobalNetworkEndpointGroupsRESTClientOptions() []option.ClientOption func (c *globalNetworkEndpointGroupsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_operations_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_operations_client.go index d4f013985..206112469 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_operations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_operations_client.go @@ -212,6 +212,7 @@ func defaultGlobalOperationsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -221,7 +222,9 @@ func defaultGlobalOperationsRESTClientOptions() []option.ClientOption { func (c *globalOperationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_organization_operations_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_organization_operations_client.go index b73f65cd5..737e380b9 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_organization_operations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_organization_operations_client.go @@ -182,6 +182,7 @@ func defaultGlobalOrganizationOperationsRESTClientOptions() []option.ClientOptio internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -191,7 +192,9 @@ func defaultGlobalOrganizationOperationsRESTClientOptions() []option.ClientOptio func (c *globalOrganizationOperationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_public_delegated_prefixes_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_public_delegated_prefixes_client.go index 3e2104ce8..e9fdcf804 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_public_delegated_prefixes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_public_delegated_prefixes_client.go @@ -216,6 +216,7 @@ func defaultGlobalPublicDelegatedPrefixesRESTClientOptions() []option.ClientOpti internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -225,7 +226,9 @@ func defaultGlobalPublicDelegatedPrefixesRESTClientOptions() []option.ClientOpti func (c *globalPublicDelegatedPrefixesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/health_checks_client.go b/vendor/cloud.google.com/go/compute/apiv1/health_checks_client.go index 2fc7e20c2..9e5701f23 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/health_checks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/health_checks_client.go @@ -246,6 +246,7 @@ func defaultHealthChecksRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -255,7 +256,9 @@ func defaultHealthChecksRESTClientOptions() []option.ClientOption { func (c *healthChecksRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/image_family_views_client.go b/vendor/cloud.google.com/go/compute/apiv1/image_family_views_client.go index d195189fe..6709ed337 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/image_family_views_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/image_family_views_client.go @@ -150,6 +150,7 @@ func defaultImageFamilyViewsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -159,7 +160,9 @@ func defaultImageFamilyViewsRESTClientOptions() []option.ClientOption { func (c *imageFamilyViewsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/images_client.go b/vendor/cloud.google.com/go/compute/apiv1/images_client.go index 5ec623981..ff1415b4e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/images_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/images_client.go @@ -294,6 +294,7 @@ func defaultImagesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -303,7 +304,9 @@ func defaultImagesRESTClientOptions() []option.ClientOption { func (c *imagesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_group_manager_resize_requests_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_group_manager_resize_requests_client.go index df1ef68df..022d23afd 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_group_manager_resize_requests_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_group_manager_resize_requests_client.go @@ -216,6 +216,7 @@ func defaultInstanceGroupManagerResizeRequestsRESTClientOptions() []option.Clien internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -225,7 +226,9 @@ func defaultInstanceGroupManagerResizeRequestsRESTClientOptions() []option.Clien func (c *instanceGroupManagerResizeRequestsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_group_managers_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_group_managers_client.go index 8738d0ab0..6216112df 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_group_managers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_group_managers_client.go @@ -385,6 +385,7 @@ func defaultInstanceGroupManagersRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -394,7 +395,9 @@ func defaultInstanceGroupManagersRESTClientOptions() []option.ClientOption { func (c *instanceGroupManagersRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_groups_client.go index c5b08a00f..097a512eb 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_groups_client.go @@ -266,6 +266,7 @@ func defaultInstanceGroupsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -275,7 +276,9 @@ func defaultInstanceGroupsRESTClientOptions() []option.ClientOption { func (c *instanceGroupsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_settings_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_settings_client.go index 5f84e25a8..ac72d6dec 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_settings_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_settings_client.go @@ -174,6 +174,7 @@ func defaultInstanceSettingsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -183,7 +184,9 @@ func defaultInstanceSettingsRESTClientOptions() []option.ClientOption { func (c *instanceSettingsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_templates_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_templates_client.go index 124c6d02f..328c405b1 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_templates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_templates_client.go @@ -265,6 +265,7 @@ func defaultInstanceTemplatesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -274,7 +275,9 @@ func defaultInstanceTemplatesRESTClientOptions() []option.ClientOption { func (c *instanceTemplatesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/instances_client.go b/vendor/cloud.google.com/go/compute/apiv1/instances_client.go index 67ee03fa3..274f86697 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instances_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instances_client.go @@ -719,6 +719,7 @@ func defaultInstancesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -728,7 +729,9 @@ func defaultInstancesRESTClientOptions() []option.ClientOption { func (c *instancesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/instant_snapshots_client.go b/vendor/cloud.google.com/go/compute/apiv1/instant_snapshots_client.go index 989c8a0e8..172c311c6 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instant_snapshots_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instant_snapshots_client.go @@ -275,6 +275,7 @@ func defaultInstantSnapshotsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -284,7 +285,9 @@ func defaultInstantSnapshotsRESTClientOptions() []option.ClientOption { func (c *instantSnapshotsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/interconnect_attachments_client.go b/vendor/cloud.google.com/go/compute/apiv1/interconnect_attachments_client.go index 126705c13..df853852a 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/interconnect_attachments_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/interconnect_attachments_client.go @@ -246,6 +246,7 @@ func defaultInterconnectAttachmentsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -255,7 +256,9 @@ func defaultInterconnectAttachmentsRESTClientOptions() []option.ClientOption { func (c *interconnectAttachmentsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/interconnect_locations_client.go b/vendor/cloud.google.com/go/compute/apiv1/interconnect_locations_client.go index 6463aacde..2431c7c67 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/interconnect_locations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/interconnect_locations_client.go @@ -172,6 +172,7 @@ func defaultInterconnectLocationsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -181,7 +182,9 @@ func defaultInterconnectLocationsRESTClientOptions() []option.ClientOption { func (c *interconnectLocationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/interconnect_remote_locations_client.go b/vendor/cloud.google.com/go/compute/apiv1/interconnect_remote_locations_client.go index cd457f53a..947cde3a1 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/interconnect_remote_locations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/interconnect_remote_locations_client.go @@ -172,6 +172,7 @@ func defaultInterconnectRemoteLocationsRESTClientOptions() []option.ClientOption internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -181,7 +182,9 @@ func defaultInterconnectRemoteLocationsRESTClientOptions() []option.ClientOption func (c *interconnectRemoteLocationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/interconnects_client.go b/vendor/cloud.google.com/go/compute/apiv1/interconnects_client.go index 5c82b6551..2be723331 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/interconnects_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/interconnects_client.go @@ -264,6 +264,7 @@ func defaultInterconnectsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -273,7 +274,9 @@ func defaultInterconnectsRESTClientOptions() []option.ClientOption { func (c *interconnectsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/license_codes_client.go b/vendor/cloud.google.com/go/compute/apiv1/license_codes_client.go index 9e8781cab..cbc598b65 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/license_codes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/license_codes_client.go @@ -161,6 +161,7 @@ func defaultLicenseCodesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -170,7 +171,9 @@ func defaultLicenseCodesRESTClientOptions() []option.ClientOption { func (c *licenseCodesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/licenses_client.go b/vendor/cloud.google.com/go/compute/apiv1/licenses_client.go index 5c58a507f..818036eb7 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/licenses_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/licenses_client.go @@ -245,6 +245,7 @@ func defaultLicensesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -254,7 +255,9 @@ func defaultLicensesRESTClientOptions() []option.ClientOption { func (c *licensesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/machine_images_client.go b/vendor/cloud.google.com/go/compute/apiv1/machine_images_client.go index 381063599..fc81e62d0 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/machine_images_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/machine_images_client.go @@ -245,6 +245,7 @@ func defaultMachineImagesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -254,7 +255,9 @@ func defaultMachineImagesRESTClientOptions() []option.ClientOption { func (c *machineImagesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/machine_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/machine_types_client.go index 0b59d54fc..05b9dec4e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/machine_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/machine_types_client.go @@ -192,6 +192,7 @@ func defaultMachineTypesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -201,7 +202,9 @@ func defaultMachineTypesRESTClientOptions() []option.ClientOption { func (c *machineTypesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/network_attachments_client.go b/vendor/cloud.google.com/go/compute/apiv1/network_attachments_client.go index a10cc2dfa..c46e8ae24 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/network_attachments_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/network_attachments_client.go @@ -275,6 +275,7 @@ func defaultNetworkAttachmentsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -284,7 +285,9 @@ func defaultNetworkAttachmentsRESTClientOptions() []option.ClientOption { func (c *networkAttachmentsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/network_edge_security_services_client.go b/vendor/cloud.google.com/go/compute/apiv1/network_edge_security_services_client.go index 202e4acf5..962688fba 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/network_edge_security_services_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/network_edge_security_services_client.go @@ -217,6 +217,7 @@ func defaultNetworkEdgeSecurityServicesRESTClientOptions() []option.ClientOption internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -226,7 +227,9 @@ func defaultNetworkEdgeSecurityServicesRESTClientOptions() []option.ClientOption func (c *networkEdgeSecurityServicesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/network_endpoint_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/network_endpoint_groups_client.go index e988640c8..9f4cc0c0e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/network_endpoint_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/network_endpoint_groups_client.go @@ -266,6 +266,7 @@ func defaultNetworkEndpointGroupsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -275,7 +276,9 @@ func defaultNetworkEndpointGroupsRESTClientOptions() []option.ClientOption { func (c *networkEndpointGroupsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/network_firewall_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/network_firewall_policies_client.go index fad8e17ac..f23acc6d0 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/network_firewall_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/network_firewall_policies_client.go @@ -353,6 +353,7 @@ func defaultNetworkFirewallPoliciesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -362,7 +363,9 @@ func defaultNetworkFirewallPoliciesRESTClientOptions() []option.ClientOption { func (c *networkFirewallPoliciesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/networks_client.go b/vendor/cloud.google.com/go/compute/apiv1/networks_client.go index 49ff4cb79..6a1742b28 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/networks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/networks_client.go @@ -294,6 +294,7 @@ func defaultNetworksRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -303,7 +304,9 @@ func defaultNetworksRESTClientOptions() []option.ClientOption { func (c *networksRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/node_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/node_groups_client.go index a9ac07cfe..c8fe11287 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/node_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/node_groups_client.go @@ -335,6 +335,7 @@ func defaultNodeGroupsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -344,7 +345,9 @@ func defaultNodeGroupsRESTClientOptions() []option.ClientOption { func (c *nodeGroupsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/node_templates_client.go b/vendor/cloud.google.com/go/compute/apiv1/node_templates_client.go index 6a04c43c6..c8d323794 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/node_templates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/node_templates_client.go @@ -265,6 +265,7 @@ func defaultNodeTemplatesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -274,7 +275,9 @@ func defaultNodeTemplatesRESTClientOptions() []option.ClientOption { func (c *nodeTemplatesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/node_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/node_types_client.go index 9ff196bf9..5d8433298 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/node_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/node_types_client.go @@ -192,6 +192,7 @@ func defaultNodeTypesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -201,7 +202,9 @@ func defaultNodeTypesRESTClientOptions() []option.ClientOption { func (c *nodeTypesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/packet_mirrorings_client.go b/vendor/cloud.google.com/go/compute/apiv1/packet_mirrorings_client.go index e85c883bd..ebef1f0d2 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/packet_mirrorings_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/packet_mirrorings_client.go @@ -246,6 +246,7 @@ func defaultPacketMirroringsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -255,7 +256,9 @@ func defaultPacketMirroringsRESTClientOptions() []option.ClientOption { func (c *packetMirroringsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/projects_client.go b/vendor/cloud.google.com/go/compute/apiv1/projects_client.go index 1253efedf..3badffc90 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/projects_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/projects_client.go @@ -315,6 +315,7 @@ func defaultProjectsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -324,7 +325,9 @@ func defaultProjectsRESTClientOptions() []option.ClientOption { func (c *projectsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/public_advertised_prefixes_client.go b/vendor/cloud.google.com/go/compute/apiv1/public_advertised_prefixes_client.go index 968794702..a2b4f0ee2 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/public_advertised_prefixes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/public_advertised_prefixes_client.go @@ -236,6 +236,7 @@ func defaultPublicAdvertisedPrefixesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultPublicAdvertisedPrefixesRESTClientOptions() []option.ClientOption { func (c *publicAdvertisedPrefixesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/public_delegated_prefixes_client.go b/vendor/cloud.google.com/go/compute/apiv1/public_delegated_prefixes_client.go index eb9d19053..98540c9b9 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/public_delegated_prefixes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/public_delegated_prefixes_client.go @@ -256,6 +256,7 @@ func defaultPublicDelegatedPrefixesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -265,7 +266,9 @@ func defaultPublicDelegatedPrefixesRESTClientOptions() []option.ClientOption { func (c *publicDelegatedPrefixesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_autoscalers_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_autoscalers_client.go index b75524581..a6a3a745a 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_autoscalers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_autoscalers_client.go @@ -226,6 +226,7 @@ func defaultRegionAutoscalersRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -235,7 +236,9 @@ func defaultRegionAutoscalersRESTClientOptions() []option.ClientOption { func (c *regionAutoscalersRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_backend_services_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_backend_services_client.go index ef9b32207..5dc154092 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_backend_services_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_backend_services_client.go @@ -304,6 +304,7 @@ func defaultRegionBackendServicesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -313,7 +314,9 @@ func defaultRegionBackendServicesRESTClientOptions() []option.ClientOption { func (c *regionBackendServicesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_commitments_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_commitments_client.go index 953fdbed5..c1169c81f 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_commitments_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_commitments_client.go @@ -226,6 +226,7 @@ func defaultRegionCommitmentsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -235,7 +236,9 @@ func defaultRegionCommitmentsRESTClientOptions() []option.ClientOption { func (c *regionCommitmentsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_disk_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_disk_types_client.go index 2e28241a4..0c3cc4a55 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_disk_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_disk_types_client.go @@ -172,6 +172,7 @@ func defaultRegionDiskTypesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -181,7 +182,9 @@ func defaultRegionDiskTypesRESTClientOptions() []option.ClientOption { func (c *regionDiskTypesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_disks_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_disks_client.go index c5f436177..b96f909e1 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_disks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_disks_client.go @@ -345,6 +345,7 @@ func defaultRegionDisksRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -354,7 +355,9 @@ func defaultRegionDisksRESTClientOptions() []option.ClientOption { func (c *regionDisksRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_health_check_services_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_health_check_services_client.go index 2c05101a8..a33c9613f 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_health_check_services_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_health_check_services_client.go @@ -216,6 +216,7 @@ func defaultRegionHealthCheckServicesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -225,7 +226,9 @@ func defaultRegionHealthCheckServicesRESTClientOptions() []option.ClientOption { func (c *regionHealthCheckServicesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_health_checks_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_health_checks_client.go index e02afd82a..6c83d7939 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_health_checks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_health_checks_client.go @@ -226,6 +226,7 @@ func defaultRegionHealthChecksRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -235,7 +236,9 @@ func defaultRegionHealthChecksRESTClientOptions() []option.ClientOption { func (c *regionHealthChecksRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instance_group_managers_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instance_group_managers_client.go index a21ce16ff..753865467 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instance_group_managers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instance_group_managers_client.go @@ -365,6 +365,7 @@ func defaultRegionInstanceGroupManagersRESTClientOptions() []option.ClientOption internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -374,7 +375,9 @@ func defaultRegionInstanceGroupManagersRESTClientOptions() []option.ClientOption func (c *regionInstanceGroupManagersRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instance_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instance_groups_client.go index 50725a87b..46916ee8e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instance_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instance_groups_client.go @@ -206,6 +206,7 @@ func defaultRegionInstanceGroupsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -215,7 +216,9 @@ func defaultRegionInstanceGroupsRESTClientOptions() []option.ClientOption { func (c *regionInstanceGroupsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instance_templates_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instance_templates_client.go index 24f68e99f..77dd53325 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instance_templates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instance_templates_client.go @@ -206,6 +206,7 @@ func defaultRegionInstanceTemplatesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -215,7 +216,9 @@ func defaultRegionInstanceTemplatesRESTClientOptions() []option.ClientOption { func (c *regionInstanceTemplatesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instances_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instances_client.go index 0aab7f8da..089279ffe 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instances_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instances_client.go @@ -155,6 +155,7 @@ func defaultRegionInstancesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -164,7 +165,9 @@ func defaultRegionInstancesRESTClientOptions() []option.ClientOption { func (c *regionInstancesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instant_snapshots_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instant_snapshots_client.go index 4a1d84b3f..ad6260bb8 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instant_snapshots_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instant_snapshots_client.go @@ -255,6 +255,7 @@ func defaultRegionInstantSnapshotsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -264,7 +265,9 @@ func defaultRegionInstantSnapshotsRESTClientOptions() []option.ClientOption { func (c *regionInstantSnapshotsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_network_endpoint_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_network_endpoint_groups_client.go index d3572b62f..c06135d13 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_network_endpoint_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_network_endpoint_groups_client.go @@ -236,6 +236,7 @@ func defaultRegionNetworkEndpointGroupsRESTClientOptions() []option.ClientOption internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultRegionNetworkEndpointGroupsRESTClientOptions() []option.ClientOption func (c *regionNetworkEndpointGroupsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_network_firewall_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_network_firewall_policies_client.go index 60447b275..071aeaea9 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_network_firewall_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_network_firewall_policies_client.go @@ -372,6 +372,7 @@ func defaultRegionNetworkFirewallPoliciesRESTClientOptions() []option.ClientOpti internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -381,7 +382,9 @@ func defaultRegionNetworkFirewallPoliciesRESTClientOptions() []option.ClientOpti func (c *regionNetworkFirewallPoliciesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_notification_endpoints_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_notification_endpoints_client.go index 842024a50..c8a387653 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_notification_endpoints_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_notification_endpoints_client.go @@ -206,6 +206,7 @@ func defaultRegionNotificationEndpointsRESTClientOptions() []option.ClientOption internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -215,7 +216,9 @@ func defaultRegionNotificationEndpointsRESTClientOptions() []option.ClientOption func (c *regionNotificationEndpointsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_operations_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_operations_client.go index 62503d667..4be12a845 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_operations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_operations_client.go @@ -192,6 +192,7 @@ func defaultRegionOperationsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -201,7 +202,9 @@ func defaultRegionOperationsRESTClientOptions() []option.ClientOption { func (c *regionOperationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_security_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_security_policies_client.go index e0a73c722..98380be4c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_security_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_security_policies_client.go @@ -265,6 +265,7 @@ func defaultRegionSecurityPoliciesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -274,7 +275,9 @@ func defaultRegionSecurityPoliciesRESTClientOptions() []option.ClientOption { func (c *regionSecurityPoliciesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_ssl_certificates_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_ssl_certificates_client.go index 17785ed42..b9baf2f8c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_ssl_certificates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_ssl_certificates_client.go @@ -206,6 +206,7 @@ func defaultRegionSslCertificatesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -215,7 +216,9 @@ func defaultRegionSslCertificatesRESTClientOptions() []option.ClientOption { func (c *regionSslCertificatesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_ssl_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_ssl_policies_client.go index 0ea01cc18..ffa01e565 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_ssl_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_ssl_policies_client.go @@ -235,6 +235,7 @@ func defaultRegionSslPoliciesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -244,7 +245,9 @@ func defaultRegionSslPoliciesRESTClientOptions() []option.ClientOption { func (c *regionSslPoliciesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_target_http_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_target_http_proxies_client.go index 708819ae5..7757843db 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_target_http_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_target_http_proxies_client.go @@ -216,6 +216,7 @@ func defaultRegionTargetHttpProxiesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -225,7 +226,9 @@ func defaultRegionTargetHttpProxiesRESTClientOptions() []option.ClientOption { func (c *regionTargetHttpProxiesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_target_https_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_target_https_proxies_client.go index ab912eb60..e5f177a83 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_target_https_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_target_https_proxies_client.go @@ -236,6 +236,7 @@ func defaultRegionTargetHttpsProxiesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultRegionTargetHttpsProxiesRESTClientOptions() []option.ClientOption { func (c *regionTargetHttpsProxiesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_target_tcp_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_target_tcp_proxies_client.go index edc0d0536..96fee8d1e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_target_tcp_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_target_tcp_proxies_client.go @@ -206,6 +206,7 @@ func defaultRegionTargetTcpProxiesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -215,7 +216,9 @@ func defaultRegionTargetTcpProxiesRESTClientOptions() []option.ClientOption { func (c *regionTargetTcpProxiesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_url_maps_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_url_maps_client.go index 212b9ddfc..437590ed5 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_url_maps_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_url_maps_client.go @@ -236,6 +236,7 @@ func defaultRegionUrlMapsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultRegionUrlMapsRESTClientOptions() []option.ClientOption { func (c *regionUrlMapsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_zones_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_zones_client.go index 4ddd6fe98..8ac8a0c31 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_zones_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_zones_client.go @@ -153,6 +153,7 @@ func defaultRegionZonesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -162,7 +163,9 @@ func defaultRegionZonesRESTClientOptions() []option.ClientOption { func (c *regionZonesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/regions_client.go b/vendor/cloud.google.com/go/compute/apiv1/regions_client.go index 417d2b168..106a8056e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/regions_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/regions_client.go @@ -172,6 +172,7 @@ func defaultRegionsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -181,7 +182,9 @@ func defaultRegionsRESTClientOptions() []option.ClientOption { func (c *regionsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/reservations_client.go b/vendor/cloud.google.com/go/compute/apiv1/reservations_client.go index 586312e1d..8628c9072 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/reservations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/reservations_client.go @@ -285,6 +285,7 @@ func defaultReservationsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -294,7 +295,9 @@ func defaultReservationsRESTClientOptions() []option.ClientOption { func (c *reservationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/resource_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/resource_policies_client.go index c7ba2077a..b8f962ad9 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/resource_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/resource_policies_client.go @@ -275,6 +275,7 @@ func defaultResourcePoliciesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -284,7 +285,9 @@ func defaultResourcePoliciesRESTClientOptions() []option.ClientOption { func (c *resourcePoliciesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/routers_client.go b/vendor/cloud.google.com/go/compute/apiv1/routers_client.go index f2ea639d5..4e13c8244 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/routers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/routers_client.go @@ -313,6 +313,7 @@ func defaultRoutersRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -322,7 +323,9 @@ func defaultRoutersRESTClientOptions() []option.ClientOption { func (c *routersRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/routes_client.go b/vendor/cloud.google.com/go/compute/apiv1/routes_client.go index 629dd89ee..fe55e761d 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/routes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/routes_client.go @@ -206,6 +206,7 @@ func defaultRoutesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -215,7 +216,9 @@ func defaultRoutesRESTClientOptions() []option.ClientOption { func (c *routesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/security_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/security_policies_client.go index 109dde55c..61fd4c56c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/security_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/security_policies_client.go @@ -314,6 +314,7 @@ func defaultSecurityPoliciesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -323,7 +324,9 @@ func defaultSecurityPoliciesRESTClientOptions() []option.ClientOption { func (c *securityPoliciesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/service_attachments_client.go b/vendor/cloud.google.com/go/compute/apiv1/service_attachments_client.go index 2e1f5dcf6..0336b7b3c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/service_attachments_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/service_attachments_client.go @@ -275,6 +275,7 @@ func defaultServiceAttachmentsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -284,7 +285,9 @@ func defaultServiceAttachmentsRESTClientOptions() []option.ClientOption { func (c *serviceAttachmentsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/snapshot_settings_client.go b/vendor/cloud.google.com/go/compute/apiv1/snapshot_settings_client.go index ae85281ef..06544ce37 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/snapshot_settings_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/snapshot_settings_client.go @@ -174,6 +174,7 @@ func defaultSnapshotSettingsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -183,7 +184,9 @@ func defaultSnapshotSettingsRESTClientOptions() []option.ClientOption { func (c *snapshotSettingsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/snapshots_client.go b/vendor/cloud.google.com/go/compute/apiv1/snapshots_client.go index 2e7ca07c4..5a7cb07d0 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/snapshots_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/snapshots_client.go @@ -255,6 +255,7 @@ func defaultSnapshotsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -264,7 +265,9 @@ func defaultSnapshotsRESTClientOptions() []option.ClientOption { func (c *snapshotsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/ssl_certificates_client.go b/vendor/cloud.google.com/go/compute/apiv1/ssl_certificates_client.go index 85ab01f0b..e88f08f6a 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/ssl_certificates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/ssl_certificates_client.go @@ -226,6 +226,7 @@ func defaultSslCertificatesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -235,7 +236,9 @@ func defaultSslCertificatesRESTClientOptions() []option.ClientOption { func (c *sslCertificatesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/ssl_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/ssl_policies_client.go index 243f29c1c..1428711ea 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/ssl_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/ssl_policies_client.go @@ -255,6 +255,7 @@ func defaultSslPoliciesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -264,7 +265,9 @@ func defaultSslPoliciesRESTClientOptions() []option.ClientOption { func (c *sslPoliciesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/storage_pool_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/storage_pool_types_client.go index 55c89d794..7f1ddb9ae 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/storage_pool_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/storage_pool_types_client.go @@ -192,6 +192,7 @@ func defaultStoragePoolTypesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -201,7 +202,9 @@ func defaultStoragePoolTypesRESTClientOptions() []option.ClientOption { func (c *storagePoolTypesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/storage_pools_client.go b/vendor/cloud.google.com/go/compute/apiv1/storage_pools_client.go index 8616a0c38..83dc8f036 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/storage_pools_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/storage_pools_client.go @@ -294,6 +294,7 @@ func defaultStoragePoolsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -303,7 +304,9 @@ func defaultStoragePoolsRESTClientOptions() []option.ClientOption { func (c *storagePoolsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/subnetworks_client.go b/vendor/cloud.google.com/go/compute/apiv1/subnetworks_client.go index beb882bd2..8c5b91b5c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/subnetworks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/subnetworks_client.go @@ -314,6 +314,7 @@ func defaultSubnetworksRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -323,7 +324,9 @@ func defaultSubnetworksRESTClientOptions() []option.ClientOption { func (c *subnetworksRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_grpc_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_grpc_proxies_client.go index 65a1bdeda..8925ff4e8 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_grpc_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_grpc_proxies_client.go @@ -216,6 +216,7 @@ func defaultTargetGrpcProxiesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -225,7 +226,9 @@ func defaultTargetGrpcProxiesRESTClientOptions() []option.ClientOption { func (c *targetGrpcProxiesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_http_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_http_proxies_client.go index f18ab1c1b..e15744b7e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_http_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_http_proxies_client.go @@ -246,6 +246,7 @@ func defaultTargetHttpProxiesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -255,7 +256,9 @@ func defaultTargetHttpProxiesRESTClientOptions() []option.ClientOption { func (c *targetHttpProxiesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_https_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_https_proxies_client.go index d302ca1a3..842ce70ce 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_https_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_https_proxies_client.go @@ -286,6 +286,7 @@ func defaultTargetHttpsProxiesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -295,7 +296,9 @@ func defaultTargetHttpsProxiesRESTClientOptions() []option.ClientOption { func (c *targetHttpsProxiesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_instances_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_instances_client.go index 4e5ceb434..cbcb2fc50 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_instances_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_instances_client.go @@ -236,6 +236,7 @@ func defaultTargetInstancesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultTargetInstancesRESTClientOptions() []option.ClientOption { func (c *targetInstancesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_pools_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_pools_client.go index 277826c88..c249c6339 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_pools_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_pools_client.go @@ -296,6 +296,7 @@ func defaultTargetPoolsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -305,7 +306,9 @@ func defaultTargetPoolsRESTClientOptions() []option.ClientOption { func (c *targetPoolsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_ssl_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_ssl_proxies_client.go index 46039e311..b7102d2e3 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_ssl_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_ssl_proxies_client.go @@ -256,6 +256,7 @@ func defaultTargetSslProxiesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -265,7 +266,9 @@ func defaultTargetSslProxiesRESTClientOptions() []option.ClientOption { func (c *targetSslProxiesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_tcp_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_tcp_proxies_client.go index 0112016fe..5e1e2c492 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_tcp_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_tcp_proxies_client.go @@ -246,6 +246,7 @@ func defaultTargetTcpProxiesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -255,7 +256,9 @@ func defaultTargetTcpProxiesRESTClientOptions() []option.ClientOption { func (c *targetTcpProxiesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_vpn_gateways_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_vpn_gateways_client.go index 5e2d23fb3..86151a83b 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_vpn_gateways_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_vpn_gateways_client.go @@ -236,6 +236,7 @@ func defaultTargetVpnGatewaysRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultTargetVpnGatewaysRESTClientOptions() []option.ClientOption { func (c *targetVpnGatewaysRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/url_maps_client.go b/vendor/cloud.google.com/go/compute/apiv1/url_maps_client.go index e1cf26095..5f6aebda9 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/url_maps_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/url_maps_client.go @@ -266,6 +266,7 @@ func defaultUrlMapsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -275,7 +276,9 @@ func defaultUrlMapsRESTClientOptions() []option.ClientOption { func (c *urlMapsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/vpn_gateways_client.go b/vendor/cloud.google.com/go/compute/apiv1/vpn_gateways_client.go index 1edf37dec..726690450 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/vpn_gateways_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/vpn_gateways_client.go @@ -265,6 +265,7 @@ func defaultVpnGatewaysRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -274,7 +275,9 @@ func defaultVpnGatewaysRESTClientOptions() []option.ClientOption { func (c *vpnGatewaysRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/vpn_tunnels_client.go b/vendor/cloud.google.com/go/compute/apiv1/vpn_tunnels_client.go index b8e1d18ad..f25369d1f 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/vpn_tunnels_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/vpn_tunnels_client.go @@ -236,6 +236,7 @@ func defaultVpnTunnelsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -245,7 +246,9 @@ func defaultVpnTunnelsRESTClientOptions() []option.ClientOption { func (c *vpnTunnelsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/zone_operations_client.go b/vendor/cloud.google.com/go/compute/apiv1/zone_operations_client.go index b1d9d1c67..126f1f614 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/zone_operations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/zone_operations_client.go @@ -192,6 +192,7 @@ func defaultZoneOperationsRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -201,7 +202,9 @@ func defaultZoneOperationsRESTClientOptions() []option.ClientOption { func (c *zoneOperationsRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/apiv1/zones_client.go b/vendor/cloud.google.com/go/compute/apiv1/zones_client.go index 0c20b8a90..e07b69345 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/zones_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/zones_client.go @@ -172,6 +172,7 @@ func defaultZonesRESTClientOptions() []option.ClientOption { internaloption.WithDefaultUniverseDomain("googleapis.com"), internaloption.WithDefaultAudience("https://compute.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableNewAuthLibrary(), } } @@ -181,7 +182,9 @@ func defaultZonesRESTClientOptions() []option.ClientOption { func (c *zonesRESTClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") - c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} + c.xGoogHeaders = []string{ + "x-goog-api-client", gax.XGoogHeader(kv...), + } } // Close closes the connection to the API service. The user should invoke this when diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index dc1cb9f60..c828210e2 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.27.0" +const Version = "1.27.1" diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 655fc5d82..a3e99df29 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -1029,6 +1029,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/developerconnect/apiv1": { + "api_shortname": "developerconnect", + "distribution_name": "cloud.google.com/go/developerconnect/apiv1", + "description": "Developer Connect API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/developerconnect/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/dialogflow/apiv2": { "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/apiv2", @@ -1519,6 +1529,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/managedkafka/apiv1": { + "api_shortname": "managedkafka", + "distribution_name": "cloud.google.com/go/managedkafka/apiv1", + "description": "Apache Kafka for BigQuery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/maps/addressvalidation/apiv1": { "api_shortname": "addressvalidation", "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", @@ -1549,16 +1569,6 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, - "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { - "api_shortname": "mapsplatformdatasets", - "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", - "description": "Maps Platform Datasets API", - "language": "go", - "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", - "release_level": "preview", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/maps/places/apiv1": { "api_shortname": "places", "distribution_name": "cloud.google.com/go/maps/places/apiv1", @@ -1566,7 +1576,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", - "release_level": "stable", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/routeoptimization/apiv1": { @@ -1596,7 +1606,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/solar/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/mediatranslation/apiv1beta1": { @@ -1749,6 +1759,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/networkservices/apiv1": { + "api_shortname": "networkservices", + "distribution_name": "cloud.google.com/go/networkservices/apiv1", + "description": "Network Services API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkservices/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/notebooks/apiv1": { "api_shortname": "notebooks", "distribution_name": "cloud.google.com/go/notebooks/apiv1", @@ -2112,7 +2132,7 @@ "cloud.google.com/go/retail/apiv2": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", @@ -2122,7 +2142,7 @@ "cloud.google.com/go/retail/apiv2alpha": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2alpha", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", @@ -2132,7 +2152,7 @@ "cloud.google.com/go/retail/apiv2beta": { "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2beta", - "description": "Retail API", + "description": "Vertex AI Search for Retail API", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", @@ -2389,6 +2409,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/accounts/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/accounts/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/accounts/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/conversions/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/conversions/apiv1beta", @@ -2399,6 +2429,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/datasources/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/datasources/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/datasources/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", @@ -2429,6 +2469,26 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/products/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/products/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/products/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/promotions/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/promotions/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/promotions/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/shopping/merchant/quota/apiv1beta": { "api_shortname": "merchantapi", "distribution_name": "cloud.google.com/go/shopping/merchant/quota/apiv1beta", diff --git a/vendor/cloud.google.com/go/internal/gen_info.sh b/vendor/cloud.google.com/go/internal/gen_info.sh new file mode 100644 index 000000000..59c190653 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/gen_info.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +# Script to generate info.go files with methods for all clients. + +if [[ $# != 2 ]]; then + echo >&2 "usage: $0 DIR PACKAGE" + exit 1 +fi + +outfile=info.go + +cd $1 + +cat <<'EOF' > $outfile +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. + +EOF + +echo -e >> $outfile "package $2\n" + + +awk '/^func \(c \*[A-Z].*\) setGoogleClientInfo/ { + printf("func (c %s SetGoogleClientInfo(keyval ...string) {\n", $3); + printf(" c.setGoogleClientInfo(keyval...)\n"); + printf("}\n\n"); +}' *_client.go >> $outfile + +gofmt -w $outfile diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go index 97738b2cb..e8daf800a 100644 --- a/vendor/cloud.google.com/go/internal/trace/trace.go +++ b/vendor/cloud.google.com/go/internal/trace/trace.go @@ -33,17 +33,22 @@ import ( ) const ( + // Deprecated: The default experimental tracing support for OpenCensus is + // now deprecated in the Google Cloud client libraries for Go. // TelemetryPlatformTracingOpenCensus is the value to which the environment // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be // set to enable OpenCensus tracing. TelemetryPlatformTracingOpenCensus = "opencensus" - // TelemetryPlatformTracingOpenCensus is the value to which the environment + // TelemetryPlatformTracingOpenTelemetry is the value to which the environment // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be // set to enable OpenTelemetry tracing. TelemetryPlatformTracingOpenTelemetry = "opentelemetry" - // TelemetryPlatformTracingOpenCensus is the name of the environment - // variable that can be set to change the default tracing from OpenCensus - // to OpenTelemetry. + // TelemetryPlatformTracingVar is the name of the environment + // variable that can be set to change the default tracing from OpenTelemetry + // to OpenCensus. + // + // The default experimental tracing support for OpenCensus is now deprecated + // in the Google Cloud client libraries for Go. TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING" // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer // when it is obtained from the OpenTelemetry TracerProvider. @@ -51,47 +56,58 @@ const ( ) var ( - // openTelemetryTracingEnabledMu guards access to openTelemetryTracingEnabled field - openTelemetryTracingEnabledMu = sync.RWMutex{} - // openTelemetryTracingEnabled is true if the environment variable + // openCensusTracingEnabledMu guards access to openCensusTracingEnabled field + openCensusTracingEnabledMu = sync.RWMutex{} + // openCensusTracingEnabled is true if the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the - // case-insensitive value "opentelemetry". - openTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace( - os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenTelemetry) + // case-insensitive value "opencensus". + openCensusTracingEnabled bool = strings.EqualFold(strings.TrimSpace( + os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenCensus) ) -// SetOpenTelemetryTracingEnabledField programmatically sets the value provided by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of unit testing. -// Do not invoke it directly. Intended for use only in unit tests. Restore original value after each test. +// SetOpenTelemetryTracingEnabledField programmatically sets the value provided +// by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of +// unit testing. Do not invoke it directly. Intended for use only in unit tests. +// Restore original value after each test. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func SetOpenTelemetryTracingEnabledField(enabled bool) { - openTelemetryTracingEnabledMu.Lock() - defer openTelemetryTracingEnabledMu.Unlock() - openTelemetryTracingEnabled = enabled + openCensusTracingEnabledMu.Lock() + defer openCensusTracingEnabledMu.Unlock() + openCensusTracingEnabled = !enabled } +// Deprecated: The default experimental tracing support for OpenCensus is now +// deprecated in the Google Cloud client libraries for Go. +// // IsOpenCensusTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the -// case-insensitive value "opentelemetry". +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the +// case-insensitive value "opencensus". func IsOpenCensusTracingEnabled() bool { - return !IsOpenTelemetryTracingEnabled() + openCensusTracingEnabledMu.RLock() + defer openCensusTracingEnabledMu.RUnlock() + return openCensusTracingEnabled } // IsOpenTelemetryTracingEnabled returns true if the environment variable -// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the -// case-insensitive value "opentelemetry". +// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the +// case-insensitive value "opencensus". func IsOpenTelemetryTracingEnabled() bool { - openTelemetryTracingEnabledMu.RLock() - defer openTelemetryTracingEnabledMu.RUnlock() - return openTelemetryTracingEnabled + return !IsOpenCensusTracingEnabled() } // StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled // returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled // returns true, the span will be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func StartSpan(ctx context.Context, name string) context.Context { if IsOpenTelemetryTracingEnabled() { ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name) @@ -105,10 +121,13 @@ func StartSpan(ctx context.Context, name string) context.Context { // returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled // returns true, the span will be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func EndSpan(ctx context.Context, err error) { if IsOpenTelemetryTracingEnabled() { span := ottrace.SpanFromContext(ctx) @@ -191,10 +210,13 @@ func httpStatusCodeToOCCode(httpStatusCode int) int32 { // OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected // span must be an OpenTelemetry span. Set the environment variable // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive -// value "opentelemetry" before loading the package to use OpenTelemetry tracing. -// The default will remain OpenCensus until May 29, 2024, at which time the default will -// switch to "opentelemetry" and explicitly setting the environment variable to -// "opencensus" will be required to continue using OpenCensus tracing. +// value "opencensus" before loading the package to use OpenCensus tracing. +// The default was OpenCensus until May 29, 2024, at which time the default was +// changed to "opencensus". Explicitly setting the environment variable to +// "opencensus" is required to continue using OpenCensus tracing. +// +// The default experimental tracing support for OpenCensus is now deprecated in +// the Google Cloud client libraries for Go. func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { if IsOpenTelemetryTracingEnabled() { attrs := otAttrs(attrMap) diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml index 7b503d26a..faedfe937 100644 --- a/vendor/github.com/Microsoft/go-winio/.golangci.yml +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -1,7 +1,3 @@ -run: - skip-dirs: - - pkg/etw/sample - linters: enable: # style @@ -20,9 +16,13 @@ linters: - gofmt # files are gofmt'ed - gosec # security - nilerr # returns nil even with non-nil error + - thelper # test helpers without t.Helper() - unparam # unused function params issues: + exclude-dirs: + - pkg/etw/sample + exclude-rules: # err is very often shadowed in nested scopes - linters: @@ -69,9 +69,7 @@ linters-settings: # struct order is often for Win32 compat # also, ignore pointer bytes/GC issues for now until performance becomes an issue - fieldalignment - check-shadowing: true nolintlint: - allow-leading-space: false require-explanation: true require-specific: true revive: diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go index 09621c884..b54341daa 100644 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -10,14 +10,14 @@ import ( "io" "os" "runtime" - "syscall" "unicode/utf16" + "github.com/Microsoft/go-winio/internal/fs" "golang.org/x/sys/windows" ) -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite +//sys backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite const ( BackupData = uint32(iota + 1) @@ -104,7 +104,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { return nil, err } - hdr.Name = syscall.UTF16ToString(name) + hdr.Name = windows.UTF16ToString(name) } if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { @@ -205,7 +205,7 @@ func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { // Read reads a backup stream from the file by calling the Win32 API BackupRead(). func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + err := backupRead(windows.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } @@ -220,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { - _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + _ = backupRead(windows.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } @@ -244,7 +244,7 @@ func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { // Write restores a portion of the file using the provided backup stream. func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + err := backupWrite(windows.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } @@ -259,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { - _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + _ = backupWrite(windows.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } @@ -271,17 +271,14 @@ func (w *BackupFileWriter) Close() error { // // If the file opened was a directory, it cannot be used with Readdir(). func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], - access, - share, + h, err := fs.CreateFile(path, + fs.AccessMask(access), + fs.FileShareMode(share), nil, - createmode, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, - 0) + fs.FileCreationDisposition(createmode), + fs.FILE_FLAG_BACKUP_SEMANTICS|fs.FILE_FLAG_OPEN_REPARSE_POINT, + 0, + ) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go index 6b3b0cd51..7f852bbf8 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -11,7 +11,6 @@ import ( "path/filepath" "strconv" "strings" - "syscall" "time" "github.com/Microsoft/go-winio" @@ -106,7 +105,7 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { + if (fileInfo.FileAttributes & windows.FILE_ATTRIBUTE_DIRECTORY) != 0 { hdr.Mode |= cISDIR hdr.Size = 0 hdr.Typeflag = tar.TypeDir @@ -378,7 +377,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size // WriteTarFileFromBackupStream. func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { name = hdr.Name - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + if hdr.Typeflag == tar.TypeReg { size = hdr.Size } fileInfo = &winio.FileBasicInfo{ @@ -396,7 +395,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win fileInfo.FileAttributes = uint32(attr) } else { if hdr.Typeflag == tar.TypeDir { - fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY + fileInfo.FileAttributes |= windows.FILE_ATTRIBUTE_DIRECTORY } } if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok { @@ -469,7 +468,7 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( } } - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { + if hdr.Typeflag == tar.TypeReg { bhdr := winio.BackupHeader{ Id: winio.BackupData, Size: hdr.Size, diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 175a99d3f..fe82a180d 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -15,26 +15,11 @@ import ( "golang.org/x/sys/windows" ) -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } - -//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} +//sys cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult var ( ErrFileClosed = errors.New("file has already been closed") @@ -50,7 +35,7 @@ func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle +var ioCompletionPort windows.Handle // ioResult contains the result of an asynchronous IO operation. type ioResult struct { @@ -60,12 +45,12 @@ type ioResult struct { // ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { - o syscall.Overlapped + o windows.Overlapped ch chan ioResult } func initIO() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + h, err := createIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) } @@ -76,10 +61,10 @@ func initIO() { // win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. // It takes ownership of this handle and will close it if it is garbage collected. type win32File struct { - handle syscall.Handle + handle windows.Handle wg sync.WaitGroup wgLock sync.RWMutex - closing atomicBool + closing atomic.Bool socket bool readDeadline deadlineHandler writeDeadline deadlineHandler @@ -90,11 +75,11 @@ type deadlineHandler struct { channel timeoutChan channelLock sync.RWMutex timer *time.Timer - timedout atomicBool + timedout atomic.Bool } // makeWin32File makes a new win32File from an existing file handle. -func makeWin32File(h syscall.Handle) (*win32File, error) { +func makeWin32File(h windows.Handle) (*win32File, error) { f := &win32File{handle: h} ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) @@ -110,7 +95,12 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { return f, nil } +// Deprecated: use NewOpenFile instead. func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return NewOpenFile(windows.Handle(h)) +} + +func NewOpenFile(h windows.Handle) (io.ReadWriteCloser, error) { // If we return the result of makeWin32File directly, it can result in an // interface-wrapped nil, rather than a nil interface value. f, err := makeWin32File(h) @@ -124,13 +114,13 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { + if !f.closing.Swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start - syscall.Close(f.handle) + windows.Close(f.handle) f.handle = 0 } else { f.wgLock.Unlock() @@ -145,14 +135,14 @@ func (f *win32File) Close() error { // IsClosed checks if the file has been closed. func (f *win32File) IsClosed() bool { - return f.closing.isSet() + return f.closing.Load() } // prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() - if f.closing.isSet() { + if f.closing.Load() { f.wgLock.RUnlock() return nil, ErrFileClosed } @@ -164,12 +154,12 @@ func (f *win32File) prepareIO() (*ioOperation, error) { } // ioCompletionProcessor processes completed async IOs forever. -func ioCompletionProcessor(h syscall.Handle) { +func ioCompletionProcessor(h windows.Handle) { for { var bytes uint32 var key uintptr var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + err := getQueuedCompletionStatus(h, &bytes, &key, &op, windows.INFINITE) if op == nil { panic(err) } @@ -182,11 +172,11 @@ func ioCompletionProcessor(h syscall.Handle) { // asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno + if err != windows.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } - if f.closing.isSet() { + if f.closing.Load() { _ = cancelIoEx(f.handle, &c.o) } @@ -201,8 +191,8 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er select { case r = <-c.ch: err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno - if f.closing.isSet() { + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if f.closing.Load() { err = ErrFileClosed } } else if err != nil && f.socket { @@ -214,7 +204,7 @@ func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, er _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno + if err == windows.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } @@ -235,23 +225,22 @@ func (f *win32File) Read(b []byte) (int, error) { } defer f.wg.Done() - if f.readDeadline.timedout.isSet() { + if f.readDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + err = windows.ReadFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno + } else if err == windows.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF - } else { - return n, err } + return n, err } // Write writes to a file handle. @@ -262,12 +251,12 @@ func (f *win32File) Write(b []byte) (int, error) { } defer f.wg.Done() - if f.writeDeadline.timedout.isSet() { + if f.writeDeadline.timedout.Load() { return 0, ErrTimeout } var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + err = windows.WriteFile(f.handle, b, &bytes, &c.o) n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err @@ -282,7 +271,7 @@ func (f *win32File) SetWriteDeadline(deadline time.Time) error { } func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) + return windows.FlushFileBuffers(f.handle) } func (f *win32File) Fd() uintptr { @@ -299,7 +288,7 @@ func (d *deadlineHandler) set(deadline time.Time) error { } d.timer = nil } - d.timedout.setFalse() + d.timedout.Store(false) select { case <-d.channel: @@ -314,7 +303,7 @@ func (d *deadlineHandler) set(deadline time.Time) error { } timeoutIO := func() { - d.timedout.setTrue() + d.timedout.Store(true) close(d.channel) } diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index 702950e72..c860eb991 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -18,9 +18,18 @@ type FileBasicInfo struct { _ uint32 // padding } +// alignedFileBasicInfo is a FileBasicInfo, but aligned to uint64 by containing +// uint64 rather than windows.Filetime. Filetime contains two uint32s. uint64 +// alignment is necessary to pass this as FILE_BASIC_INFO. +type alignedFileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime uint64 + FileAttributes uint32 + _ uint32 // padding +} + // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} + bi := &alignedFileBasicInfo{} if err := windows.GetFileInformationByHandleEx( windows.Handle(f.Fd()), windows.FileBasicInfo, @@ -30,16 +39,21 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) - return bi, nil + // Reinterpret the alignedFileBasicInfo as a FileBasicInfo so it matches the + // public API of this module. The data may be unnecessarily aligned. + return (*FileBasicInfo)(unsafe.Pointer(bi)), nil } // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + // Create an alignedFileBasicInfo based on a FileBasicInfo. The copy is + // suitable to pass to GetFileInformationByHandleEx. + biAligned := *(*alignedFileBasicInfo)(unsafe.Pointer(bi)) if err := windows.SetFileInformationByHandle( windows.Handle(f.Fd()), windows.FileBasicInfo, - (*byte)(unsafe.Pointer(bi)), - uint32(unsafe.Sizeof(*bi)), + (*byte)(unsafe.Pointer(&biAligned)), + uint32(unsafe.Sizeof(biAligned)), ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index c88191658..c4fdd9d4a 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -10,7 +10,6 @@ import ( "io" "net" "os" - "syscall" "time" "unsafe" @@ -181,13 +180,13 @@ type HvsockConn struct { var _ net.Conn = &HvsockConn{} func newHVSocket() (*win32File, error) { - fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) + fd, err := windows.Socket(afHVSock, windows.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } f, err := makeWin32File(fd) if err != nil { - syscall.Close(fd) + windows.Close(fd) return nil, err } f.socket = true @@ -197,16 +196,24 @@ func newHVSocket() (*win32File, error) { // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} - sock, err := newHVSocket() + + var sock *win32File + sock, err = newHVSocket() if err != nil { return nil, l.opErr("listen", err) } + defer func() { + if err != nil { + _ = sock.Close() + } + }() + sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) + err = socket.Bind(sock.handle, &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } - err = syscall.Listen(sock.handle, 16) + err = windows.Listen(sock.handle, 16) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("listen", err)) } @@ -246,7 +253,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) + err = windows.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o) if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } @@ -263,7 +270,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) // initialize the accepted socket and update its properties with those of the listening socket - if err = windows.Setsockopt(windows.Handle(sock.handle), + if err = windows.Setsockopt(sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) @@ -334,7 +341,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock }() sa := addr.raw() - err = socket.Bind(windows.Handle(sock.handle), &sa) + err = socket.Bind(sock.handle, &sa) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("bind", err)) } @@ -347,7 +354,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock var bytes uint32 for i := uint(0); i <= d.Retries; i++ { err = socket.ConnectEx( - windows.Handle(sock.handle), + sock.handle, &sa, nil, // sendBuf 0, // sendDataLen @@ -367,7 +374,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock // update the connection properties, so shutdown can be used if err = windows.Setsockopt( - windows.Handle(sock.handle), + sock.handle, windows.SOL_SOCKET, windows.SO_UPDATE_CONNECT_CONTEXT, nil, // optvalue @@ -378,7 +385,7 @@ func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *Hvsock // get the local name var sal rawHvsockAddr - err = socket.GetSockName(windows.Handle(sock.handle), &sal) + err = socket.GetSockName(sock.handle, &sal) if err != nil { return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) } @@ -421,7 +428,7 @@ func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { return ctx.Err() } -// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +// assumes error is a plain, unwrapped windows.Errno provided by direct syscall. func canRedial(err error) bool { //nolint:errorlint // guaranteed to be an Errno switch err { @@ -447,9 +454,9 @@ func (conn *HvsockConn) Read(b []byte) (int, error) { return 0, conn.opErr("read", err) } defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + err = windows.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { var eno windows.Errno @@ -482,9 +489,9 @@ func (conn *HvsockConn) write(b []byte) (int, error) { return 0, conn.opErr("write", err) } defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + buf := windows.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + err = windows.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { var eno windows.Errno @@ -511,7 +518,7 @@ func (conn *HvsockConn) shutdown(how int) error { return socket.ErrSocketClosed } - err := syscall.Shutdown(conn.sock.handle, how) + err := windows.Shutdown(conn.sock.handle, how) if err != nil { // If the connection was closed, shutdowns fail with "not connected" if errors.Is(err, windows.WSAENOTCONN) || @@ -525,7 +532,7 @@ func (conn *HvsockConn) shutdown(how int) error { // CloseRead shuts down the read end of the socket, preventing future read operations. func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) + err := conn.shutdown(windows.SHUT_RD) if err != nil { return conn.opErr("closeread", err) } @@ -535,7 +542,7 @@ func (conn *HvsockConn) CloseRead() error { // CloseWrite shuts down the write end of the socket, preventing future write operations and // notifying the other endpoint that no more data will be written. func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) + err := conn.shutdown(windows.SHUT_WR) if err != nil { return conn.opErr("closewrite", err) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go index 509b3ec64..0cd9621df 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/fs.go @@ -11,12 +11,14 @@ import ( //go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew -//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW +//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW const NullHandle windows.Handle = 0 // AccessMask defines standard, specific, and generic rights. // +// Used with CreateFile and NtCreateFile (and co.). +// // Bitmask: // 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 // 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 @@ -47,6 +49,12 @@ const ( // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters FILE_ANY_ACCESS AccessMask = 0 + GENERIC_READ AccessMask = 0x8000_0000 + GENERIC_WRITE AccessMask = 0x4000_0000 + GENERIC_EXECUTE AccessMask = 0x2000_0000 + GENERIC_ALL AccessMask = 0x1000_0000 + ACCESS_SYSTEM_SECURITY AccessMask = 0x0100_0000 + // Specific Object Access // from ntioapi.h @@ -124,14 +132,32 @@ const ( TRUNCATE_EXISTING FileCreationDisposition = 0x05 ) +// Create disposition values for NtCreate* +type NTFileCreationDisposition uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_SUPERSEDE NTFileCreationDisposition = 0x00 + FILE_OPEN NTFileCreationDisposition = 0x01 + FILE_CREATE NTFileCreationDisposition = 0x02 + FILE_OPEN_IF NTFileCreationDisposition = 0x03 + FILE_OVERWRITE NTFileCreationDisposition = 0x04 + FILE_OVERWRITE_IF NTFileCreationDisposition = 0x05 + FILE_MAXIMUM_DISPOSITION NTFileCreationDisposition = 0x05 +) + // CreateFile and co. take flags or attributes together as one parameter. // Define alias until we can use generics to allow both - +// // https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants type FileFlagOrAttribute uint32 //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winnt.h +const ( + // from winnt.h + FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000 FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000 FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000 @@ -145,17 +171,51 @@ const ( // from winnt.h FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000 ) +// NtCreate* functions take a dedicated CreateOptions parameter. +// +// https://learn.microsoft.com/en-us/windows/win32/api/Winternl/nf-winternl-ntcreatefile +// +// https://learn.microsoft.com/en-us/windows/win32/devnotes/nt-create-named-pipe-file +type NTCreateOptions uint32 + +//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. +const ( + // From ntioapi.h + + FILE_DIRECTORY_FILE NTCreateOptions = 0x0000_0001 + FILE_WRITE_THROUGH NTCreateOptions = 0x0000_0002 + FILE_SEQUENTIAL_ONLY NTCreateOptions = 0x0000_0004 + FILE_NO_INTERMEDIATE_BUFFERING NTCreateOptions = 0x0000_0008 + + FILE_SYNCHRONOUS_IO_ALERT NTCreateOptions = 0x0000_0010 + FILE_SYNCHRONOUS_IO_NONALERT NTCreateOptions = 0x0000_0020 + FILE_NON_DIRECTORY_FILE NTCreateOptions = 0x0000_0040 + FILE_CREATE_TREE_CONNECTION NTCreateOptions = 0x0000_0080 + + FILE_COMPLETE_IF_OPLOCKED NTCreateOptions = 0x0000_0100 + FILE_NO_EA_KNOWLEDGE NTCreateOptions = 0x0000_0200 + FILE_DISABLE_TUNNELING NTCreateOptions = 0x0000_0400 + FILE_RANDOM_ACCESS NTCreateOptions = 0x0000_0800 + + FILE_DELETE_ON_CLOSE NTCreateOptions = 0x0000_1000 + FILE_OPEN_BY_FILE_ID NTCreateOptions = 0x0000_2000 + FILE_OPEN_FOR_BACKUP_INTENT NTCreateOptions = 0x0000_4000 + FILE_NO_COMPRESSION NTCreateOptions = 0x0000_8000 +) + type FileSQSFlag = FileFlagOrAttribute //nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API. -const ( // from winbase.h +const ( + // from winbase.h + SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16) SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16) SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16) SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16) - SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000 - SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000 + SECURITY_SQOS_PRESENT FileSQSFlag = 0x0010_0000 + SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F_0000 ) // GetFinalPathNameByHandle flags diff --git a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go index e2f7bb24e..a94e234c7 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -45,7 +42,7 @@ var ( procCreateFileW = modkernel32.NewProc("CreateFileW") ) -func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { +func CreateFile(name string, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { @@ -54,8 +51,8 @@ func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall. return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile) } -func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) +func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *windows.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = windows.Handle(r0) if handle == windows.InvalidHandle { err = errnoErr(e1) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go index aeb7b7250..88580d974 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -156,9 +156,7 @@ func connectEx( bytesSent *uint32, overlapped *windows.Overlapped, ) (err error) { - // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN - r1, _, e1 := syscall.Syscall9(connectExFunc.addr, - 7, + r1, _, e1 := syscall.SyscallN(connectExFunc.addr, uintptr(s), uintptr(name), uintptr(namelen), @@ -166,8 +164,8 @@ func connectEx( uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), - 0, - 0) + ) + if r1 == 0 { if e1 != 0 { err = error(e1) diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go index 6d2e1a9e4..e1504126a 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -48,7 +45,7 @@ var ( ) func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socketError { err = errnoErr(e1) } @@ -56,7 +53,7 @@ func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { } func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } @@ -64,7 +61,7 @@ func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err err } func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) if r1 == socketError { err = errnoErr(e1) } diff --git a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go index 7ad505702..42ebc019f 100644 --- a/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go +++ b/vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go @@ -62,7 +62,7 @@ func (b *WString) Free() { // ResizeTo grows the buffer to at least c and returns the new capacity, freeing the // previous buffer back into pool. func (b *WString) ResizeTo(c uint32) uint32 { - // allready sufficient (or n is 0) + // already sufficient (or n is 0) if c <= b.Cap() { return b.Cap() } diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index 25cc81103..a2da6639d 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -11,7 +11,6 @@ import ( "net" "os" "runtime" - "syscall" "time" "unsafe" @@ -20,20 +19,44 @@ import ( "github.com/Microsoft/go-winio/internal/fs" ) -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateNamedPipeW +//sys disconnectNamedPipe(pipe windows.Handle) (err error) = DisconnectNamedPipe +//sys getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile //sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb //sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U //sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl +type PipeConn interface { + net.Conn + Disconnect() error + Flush() error +} + +// type aliases for mkwinsyscall code +type ( + ntAccessMask = fs.AccessMask + ntFileShareMode = fs.FileShareMode + ntFileCreationDisposition = fs.NTFileCreationDisposition + ntFileOptions = fs.NTCreateOptions +) + type ioStatusBlock struct { Status, Information uintptr } +// typedef struct _OBJECT_ATTRIBUTES { +// ULONG Length; +// HANDLE RootDirectory; +// PUNICODE_STRING ObjectName; +// ULONG Attributes; +// PVOID SecurityDescriptor; +// PVOID SecurityQualityOfService; +// } OBJECT_ATTRIBUTES; +// +// https://learn.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_object_attributes type objectAttributes struct { Length uintptr RootDirectory uintptr @@ -49,6 +72,17 @@ type unicodeString struct { Buffer uintptr } +// typedef struct _SECURITY_DESCRIPTOR { +// BYTE Revision; +// BYTE Sbz1; +// SECURITY_DESCRIPTOR_CONTROL Control; +// PSID Owner; +// PSID Group; +// PACL Sacl; +// PACL Dacl; +// } SECURITY_DESCRIPTOR, *PISECURITY_DESCRIPTOR; +// +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-security_descriptor type securityDescriptor struct { Revision byte Sbz1 byte @@ -80,6 +114,8 @@ type win32Pipe struct { path string } +var _ PipeConn = (*win32Pipe)(nil) + type win32MessageBytePipe struct { win32Pipe writeClosed bool @@ -103,6 +139,10 @@ func (f *win32Pipe) SetDeadline(t time.Time) error { return f.SetWriteDeadline(t) } +func (f *win32Pipe) Disconnect() error { + return disconnectNamedPipe(f.win32File.handle) +} + // CloseWrite closes the write side of a message pipe in byte mode. func (f *win32MessageBytePipe) CloseWrite() error { if f.writeClosed { @@ -146,7 +186,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno + } else if err == windows.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. @@ -164,21 +204,20 @@ func (s pipeAddress) String() string { } // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) { +func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask, impLevel PipeImpLevel) (windows.Handle, error) { for { select { case <-ctx.Done(): - return syscall.Handle(0), ctx.Err() + return windows.Handle(0), ctx.Err() default: - wh, err := fs.CreateFile(*path, + h, err := fs.CreateFile(*path, access, 0, // mode nil, // security attributes fs.OPEN_EXISTING, - fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS, + fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.FileSQSFlag(impLevel), 0, // template file handle ) - h := syscall.Handle(wh) if err == nil { return h, nil } @@ -214,15 +253,33 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { // DialPipeContext attempts to connect to a named pipe by `path` until `ctx` // cancellation or timeout. func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) + return DialPipeAccess(ctx, path, uint32(fs.GENERIC_READ|fs.GENERIC_WRITE)) } +// PipeImpLevel is an enumeration of impersonation levels that may be set +// when calling DialPipeAccessImpersonation. +type PipeImpLevel uint32 + +const ( + PipeImpLevelAnonymous = PipeImpLevel(fs.SECURITY_ANONYMOUS) + PipeImpLevelIdentification = PipeImpLevel(fs.SECURITY_IDENTIFICATION) + PipeImpLevelImpersonation = PipeImpLevel(fs.SECURITY_IMPERSONATION) + PipeImpLevelDelegation = PipeImpLevel(fs.SECURITY_DELEGATION) +) + // DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` // cancellation or timeout. func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + return DialPipeAccessImpLevel(ctx, path, access, PipeImpLevelAnonymous) +} + +// DialPipeAccessImpLevel attempts to connect to a named pipe by `path` with +// `access` at `impLevel` until `ctx` cancellation or timeout. The other +// DialPipe* implementations use PipeImpLevelAnonymous. +func DialPipeAccessImpLevel(ctx context.Context, path string, access uint32, impLevel PipeImpLevel) (net.Conn, error) { var err error - var h syscall.Handle - h, err = tryDialPipe(ctx, &path, fs.AccessMask(access)) + var h windows.Handle + h, err = tryDialPipe(ctx, &path, fs.AccessMask(access), impLevel) if err != nil { return nil, err } @@ -235,7 +292,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, f, err := makeWin32File(h) if err != nil { - syscall.Close(h) + windows.Close(h) return nil, err } @@ -255,7 +312,7 @@ type acceptResponse struct { } type win32PipeListener struct { - firstHandle syscall.Handle + firstHandle windows.Handle path string config PipeConfig acceptCh chan (chan acceptResponse) @@ -263,8 +320,8 @@ type win32PipeListener struct { doneCh chan int } -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - path16, err := syscall.UTF16FromString(path) +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (windows.Handle, error) { + path16, err := windows.UTF16FromString(path) if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } @@ -280,16 +337,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } - defer localFree(ntPath.Buffer) + defer windows.LocalFree(windows.Handle(ntPath.Buffer)) //nolint:errcheck oa.ObjectName = &ntPath oa.Attributes = windows.OBJ_CASE_INSENSITIVE // The security descriptor is only needed for the first pipe. if first { if sd != nil { + //todo: does `sdb` need to be allocated on the heap, or can go allocate it? l := uint32(len(sd)) - sdb := localAlloc(0, l) - defer localFree(sdb) + sdb, err := windows.LocalAlloc(0, l) + if err != nil { + return 0, fmt.Errorf("LocalAlloc for security descriptor with of length %d: %w", l, err) + } + defer windows.LocalFree(windows.Handle(sdb)) //nolint:errcheck copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) } else { @@ -298,7 +359,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } - defer localFree(dacl) + defer windows.LocalFree(windows.Handle(dacl)) //nolint:errcheck sdb := &securityDescriptor{ Revision: 1, @@ -314,27 +375,27 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy typ |= windows.FILE_PIPE_MESSAGE_TYPE } - disposition := uint32(windows.FILE_OPEN) - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + disposition := fs.FILE_OPEN + access := fs.GENERIC_READ | fs.GENERIC_WRITE | fs.SYNCHRONIZE if first { - disposition = windows.FILE_CREATE + disposition = fs.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. - access = syscall.SYNCHRONIZE + access = fs.SYNCHRONIZE } timeout := int64(-50 * 10000) // 50ms var ( - h syscall.Handle + h windows.Handle iosb ioStatusBlock ) err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + fs.FILE_SHARE_READ|fs.FILE_SHARE_WRITE, disposition, 0, typ, @@ -359,7 +420,7 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) { } f, err := makeWin32File(h) if err != nil { - syscall.Close(h) + windows.Close(h) return nil, err } return f, nil @@ -418,7 +479,7 @@ func (l *win32PipeListener) listenerRoutine() { closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } - syscall.Close(l.firstHandle) + windows.Close(l.firstHandle) l.firstHandle = 0 // Notify Close() and Accept() callers that the handle has been closed. close(l.doneCh) diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index 0ff9dac90..d9b90b6e8 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -9,7 +9,6 @@ import ( "fmt" "runtime" "sync" - "syscall" "unicode/utf16" "golang.org/x/sys/windows" @@ -18,8 +17,8 @@ import ( //sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges //sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf //sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h windows.Handle) = GetCurrentThread //sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW //sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW @@ -29,7 +28,7 @@ const ( SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED //revive:disable-next-line:var-naming ALL_CAPS - ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED + ERROR_NOT_ALL_ASSIGNED windows.Errno = windows.ERROR_NOT_ALL_ASSIGNED SeBackupPrivilege = "SeBackupPrivilege" SeRestorePrivilege = "SeRestorePrivilege" @@ -177,7 +176,7 @@ func newThreadToken() (windows.Token, error) { } var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + err = openThreadToken(getCurrentThread(), windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, false, &token) if err != nil { rerr := revertToSelf() if rerr != nil { diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go index 5550ef6b6..c3685e98e 100644 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -5,7 +5,7 @@ package winio import ( "errors" - "syscall" + "fmt" "unsafe" "golang.org/x/sys/windows" @@ -15,10 +15,6 @@ import ( //sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW //sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength type AccountLookupError struct { Name string @@ -64,7 +60,7 @@ func LookupSidByName(name string) (sid string, err error) { var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) @@ -78,8 +74,8 @@ func LookupSidByName(name string) (sid string, err error) { if err != nil { return "", &AccountLookupError{name, err} } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) + sid = windows.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + _, _ = windows.LocalFree(windows.Handle(unsafe.Pointer(strBuffer))) return sid, nil } @@ -100,7 +96,7 @@ func LookupNameBySid(sid string) (name string, err error) { if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { return "", &AccountLookupError{sid, err} } - defer localFree(uintptr(unsafe.Pointer(sidPtr))) + defer windows.LocalFree(windows.Handle(unsafe.Pointer(sidPtr))) //nolint:errcheck var nameSize, refDomainSize, sidNameUse uint32 err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) @@ -120,25 +116,18 @@ func LookupNameBySid(sid string) (name string, err error) { } func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + sd, err := windows.SecurityDescriptorFromString(sddl) if err != nil { - return nil, &SddlConversionError{sddl, err} + return nil, &SddlConversionError{Sddl: sddl, Err: err} } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil + b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length()) + return b, nil } func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to include an arbitrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err + if l := int(unsafe.Sizeof(windows.SECURITY_DESCRIPTOR{})); len(sd) < l { + return "", fmt.Errorf("SecurityDescriptor (%d) smaller than expected (%d): %w", len(sd), l, windows.ERROR_INCORRECT_SIZE) } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil + s := (*windows.SECURITY_DESCRIPTOR)(unsafe.Pointer(&sd[0])) + return s.String(), nil } diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go deleted file mode 100644 index 2aa045843..000000000 --- a/vendor/github.com/Microsoft/go-winio/tools.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build tools - -package winio - -import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go index d0e917d2b..95c040743 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -50,7 +47,7 @@ var ( ) func attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) { - r0, _, _ := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) + r0, _, _ := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(attachVirtualDiskFlag), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -67,7 +64,7 @@ func createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virt } func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) { - r0, _, _ := syscall.Syscall9(procCreateVirtualDisk.Addr(), 9, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) + r0, _, _ := syscall.SyscallN(procCreateVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(unsafe.Pointer(securityDescriptor)), uintptr(createVirtualDiskFlags), uintptr(providerSpecificFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(handle))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -75,7 +72,7 @@ func _createVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, vi } func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) { - r0, _, _ := syscall.Syscall(procDetachVirtualDisk.Addr(), 3, uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) + r0, _, _ := syscall.SyscallN(procDetachVirtualDisk.Addr(), uintptr(handle), uintptr(detachVirtualDiskFlags), uintptr(providerSpecificFlags)) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -83,7 +80,7 @@ func detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, pro } func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) { - r0, _, _ := syscall.Syscall(procGetVirtualDiskPhysicalPath.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) + r0, _, _ := syscall.SyscallN(procGetVirtualDiskPhysicalPath.Addr(), uintptr(handle), uintptr(unsafe.Pointer(diskPathSizeInBytes)), uintptr(unsafe.Pointer(buffer))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -100,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua } func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) { - r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + r0, _, _ := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) if r0 != 0 { win32err = syscall.Errno(r0) } diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 469b16f63..89b66eda8 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -45,38 +42,34 @@ var ( modntdll = windows.NewLazySystemDLL("ntdll.dll") modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { @@ -84,7 +77,7 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou if releaseAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + r0, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) success = r0 != 0 if true { err = errnoErr(e1) @@ -92,33 +85,8 @@ func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, ou return } -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str))) if r1 == 0 { err = errnoErr(e1) } @@ -126,21 +94,15 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func convertStringSidToSid(str *uint16, sid **byte) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } return } -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(level)) if r1 == 0 { err = errnoErr(e1) } @@ -157,7 +119,7 @@ func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSiz } func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } @@ -165,7 +127,7 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS } func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse))) if r1 == 0 { err = errnoErr(e1) } @@ -182,7 +144,7 @@ func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, } func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeDisplayNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId))) if r1 == 0 { err = errnoErr(e1) } @@ -199,7 +161,7 @@ func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size * } func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -221,19 +183,19 @@ func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err err } func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } return } -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { +func openThreadToken(thread windows.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { var _p0 uint32 if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -241,14 +203,14 @@ func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, } func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } return } -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { +func backupRead(h windows.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] @@ -261,14 +223,14 @@ func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, proce if processSecurity { _p2 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.SyscallN(procBackupRead.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { +func backupWrite(h windows.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { var _p0 *byte if len(b) > 0 { _p0 = &b[0] @@ -281,39 +243,39 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p if processSecurity { _p2 = 1 } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + r1, _, e1 := syscall.SyscallN(procBackupWrite.Addr(), uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } return } -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) +func cancelIoEx(file windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(file), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) +func connectNamedPipe(pipe windows.Handle, o *windows.Overlapped) (err error) { + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } return } -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) +func createIoCompletionPort(file windows.Handle, port windows.Handle, key uintptr, threadCount uint32) (newport windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount)) + newport = windows.Handle(r0) if newport == 0 { err = errnoErr(e1) } return } -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(name) if err != nil { @@ -322,96 +284,93 @@ func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances ui return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) } -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *windows.SecurityAttributes) (handle windows.Handle, err error) { + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) + handle = windows.Handle(r0) + if handle == windows.InvalidHandle { err = errnoErr(e1) } return } -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) +func disconnectNamedPipe(pipe windows.Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } return } -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) +func getCurrentThread() (h windows.Handle) { + r0, _, _ := syscall.SyscallN(procGetCurrentThread.Addr()) + h = windows.Handle(r0) + return +} + +func getNamedPipeHandleState(pipe windows.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } return } -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) +func getNamedPipeInfo(pipe windows.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) if r1 == 0 { err = errnoErr(e1) } return } -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) - return -} - -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) - return -} - -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) +func getQueuedCompletionStatus(port windows.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } return } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) +func setFileCompletionNotificationModes(h windows.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(h), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ntCreateNamedPipeFile(pipe *windows.Handle, access ntAccessMask, oa *objectAttributes, iosb *ioStatusBlock, share ntFileShareMode, disposition ntFileCreationDisposition, options ntFileOptions, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) status = ntStatus(r0) return } func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(dacl))) status = ntStatus(r0) return } func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved)) status = ntStatus(r0) return } func rtlNtStatusToDosError(status ntStatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(status)) if r0 != 0 { winerr = syscall.Errno(r0) } return } -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { +func wsaGetOverlappedResult(h windows.Handle, o *windows.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { var _p0 uint32 if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go index 53d0beb87..9f697beca 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -75,7 +72,7 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + r0, _, _ := syscall.SyscallN(procHcsAttachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -104,7 +101,7 @@ func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsAttachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + r0, _, _ := syscall.SyscallN(procHcsAttachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -128,7 +125,7 @@ func _hcsDestroyLayer(layerPath *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsDestroyLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsDestroyLayer.Addr(), uintptr(unsafe.Pointer(layerPath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -152,7 +149,7 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsDetachLayerStorageFilter.Addr(), uintptr(unsafe.Pointer(layerPath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -181,7 +178,7 @@ func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsDetachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0) + r0, _, _ := syscall.SyscallN(procHcsDetachOverlayFilter.Addr(), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -220,7 +217,7 @@ func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uin if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsExportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -235,7 +232,7 @@ func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsFormatWritableLayerVhd.Addr(), uintptr(handle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -250,7 +247,7 @@ func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr e if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + r0, _, _ := syscall.SyscallN(procHcsGetLayerVhdMountPath.Addr(), uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -284,7 +281,7 @@ func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uin if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + r0, _, _ := syscall.SyscallN(procHcsImportLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -318,7 +315,7 @@ func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, o if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) + r0, _, _ := syscall.SyscallN(procHcsInitializeWritableLayer.Addr(), uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -347,7 +344,7 @@ func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uin if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSLayer.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -381,7 +378,7 @@ func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint1 if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) + r0, _, _ := syscall.SyscallN(procHcsSetupBaseOSVolume.Addr(), uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go index 5dcb97eb3..76eb2be7c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -14,14 +14,14 @@ import ( "golang.org/x/sys/windows" ) -// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles +// makeOpenFiles calls winio.NewOpenFile for each handle in a slice but closes all the handles // if there is an error. func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { fs := make([]io.ReadWriteCloser, len(hs)) for i, h := range hs { if h != syscall.Handle(0) { if err == nil { - fs[i], err = winio.MakeOpenFile(h) + fs[i], err = winio.NewOpenFile(windows.Handle(h)) } if err != nil { syscall.Close(h) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go index a35ee945d..11c7e97e3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -69,7 +66,7 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + r0, _, _ := syscall.SyscallN(procHNSCall.Addr(), uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go index a17a11250..14c750bd8 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -46,6 +43,6 @@ var ( ) func coTaskMemFree(buffer unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(buffer)) return } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go index 26c986b88..395f54687 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -48,7 +45,7 @@ var ( ) func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(unsafe.Pointer(ppsidOwner)), uintptr(unsafe.Pointer(ppsidGroup)), uintptr(unsafe.Pointer(ppDacl)), uintptr(unsafe.Pointer(ppSacl)), uintptr(unsafe.Pointer(ppSecurityDescriptor))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -56,7 +53,7 @@ func getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidO } func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl *uintptr) (win32err error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(count), uintptr(pListOfEEs), uintptr(oldAcl), uintptr(unsafe.Pointer(newAcl))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -64,7 +61,7 @@ func setEntriesInAcl(count uintptr, pListOfEEs uintptr, oldAcl uintptr, newAcl * } func setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(si), uintptr(psidOwner), uintptr(psidGroup), uintptr(pDacl), uintptr(pSacl)) if r0 != 0 { win32err = syscall.Errno(r0) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go index 42368872b..67779de50 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -75,7 +72,7 @@ func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsCloseComputeSystem.Addr(), uintptr(computeSystem)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -90,7 +87,7 @@ func hcsCloseProcess(process HcsProcess) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsCloseProcess.Addr(), uintptr(process)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -119,7 +116,7 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procHcsCreateComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -143,7 +140,7 @@ func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, proce if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procHcsCreateProcess.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -167,7 +164,7 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsEnumerateComputeSystems.Addr(), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -191,7 +188,7 @@ func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsGetComputeSystemProperties.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -206,7 +203,7 @@ func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInforma if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsGetProcessInfo.Addr(), uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -221,7 +218,7 @@ func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, res if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsGetProcessProperties.Addr(), uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -245,7 +242,7 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsGetServiceProperties.Addr(), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -269,7 +266,7 @@ func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, res if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsModifyComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -293,7 +290,7 @@ func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (h if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsModifyProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -317,7 +314,7 @@ func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procHcsModifyServiceSettings.Addr(), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -341,7 +338,7 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16 if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsOpenComputeSystem.Addr(), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -356,7 +353,7 @@ func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, re if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsOpenProcess.Addr(), uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -380,7 +377,7 @@ func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **u if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsPauseComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -395,7 +392,7 @@ func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsRegisterComputeSystemCallback.Addr(), uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -410,7 +407,7 @@ func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context ui if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsRegisterProcessCallback.Addr(), uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -434,7 +431,7 @@ func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result ** if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsResumeComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -458,7 +455,7 @@ func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **ui if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsSaveComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -482,7 +479,7 @@ func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsShutdownComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -506,7 +503,7 @@ func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsSignalProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -530,7 +527,7 @@ func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **u if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsStartComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -554,7 +551,7 @@ func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procHcsTerminateComputeSystem.Addr(), uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -569,7 +566,7 @@ func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procHcsTerminateProcess.Addr(), uintptr(process), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -584,7 +581,7 @@ func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsUnregisterComputeSystemCallback.Addr(), uintptr(callbackHandle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -599,7 +596,7 @@ func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procHcsUnregisterProcessCallback.Addr(), uintptr(callbackHandle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go index 0cb509c46..403b94fc5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -77,7 +74,7 @@ func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, } func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -85,7 +82,7 @@ func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int6 } func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) + r1, _, e1 := syscall.SyscallN(procAttachVirtualDisk.Addr(), uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) if r1 != 0 { err = errnoErr(e1) } @@ -102,7 +99,7 @@ func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtua } func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procOpenVirtualDisk.Addr(), uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) if r1 != 0 { err = errnoErr(e1) } @@ -123,7 +120,7 @@ func _activateLayer(info *driverInfo, id *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.SyscallN(procActivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -156,7 +153,7 @@ func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC if len(descriptors) > 0 { _p2 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.SyscallN(procCopyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -185,7 +182,7 @@ func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) + r0, _, _ := syscall.SyscallN(procCreateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -213,7 +210,7 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto if len(descriptors) > 0 { _p1 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.SyscallN(procCreateSandboxLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -237,7 +234,7 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.SyscallN(procDeactivateLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -261,7 +258,7 @@ func _destroyLayer(info *driverInfo, id *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.SyscallN(procDestroyLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -285,7 +282,7 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procExpandSandboxSize.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -318,7 +315,7 @@ func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L if len(descriptors) > 0 { _p2 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.SyscallN(procExportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -333,7 +330,7 @@ func getBaseImages(buffer **uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetBaseImages.Addr(), uintptr(unsafe.Pointer(buffer))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -357,7 +354,7 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u if hr != nil { return } - r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLayerMountPath.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -386,7 +383,7 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) + r0, _, _ := syscall.SyscallN(procGrantVmAccess.Addr(), uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -419,7 +416,7 @@ func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_L if len(descriptors) > 0 { _p2 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.SyscallN(procImportLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -443,7 +440,7 @@ func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) + r0, _, _ := syscall.SyscallN(procLayerExists.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -467,7 +464,7 @@ func _nameToGuid(name *uint16, guid *_guid) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) + r0, _, _ := syscall.SyscallN(procNameToGuid.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -495,7 +492,7 @@ func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPT if len(descriptors) > 0 { _p1 = &descriptors[0] } - r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) + r0, _, _ := syscall.SyscallN(procPrepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -519,7 +516,7 @@ func _processBaseImage(path *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procProcessBaseImage.Addr(), uintptr(unsafe.Pointer(path))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -543,7 +540,7 @@ func _processUtilityImage(path *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procProcessUtilityImage.Addr(), uintptr(unsafe.Pointer(path))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -567,7 +564,7 @@ func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.SyscallN(procUnprepareLayer.Addr(), uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go index 93d633d49..70c43fc1c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -4,7 +4,6 @@ package winapi import ( "errors" - "reflect" "syscall" "unsafe" @@ -14,11 +13,7 @@ import ( // Uint16BufferToSlice wraps a uint16 pointer-and-length into a slice // for easier interop with Go APIs func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&result)) - hdr.Data = uintptr(unsafe.Pointer(buffer)) - hdr.Cap = bufferLength - hdr.Len = bufferLength - + result = unsafe.Slice(buffer, bufferLength) return } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index ffd3cd7ff..33720fe8b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -109,7 +106,7 @@ var ( ) func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { - r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procLogonUserW.Addr(), uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -121,7 +118,7 @@ func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, if hr != nil { return } - r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount)) + r0, _, _ := syscall.SyscallN(procBfSetupFilter.Addr(), uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -132,7 +129,7 @@ func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, } func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_PropertyW.Addr(), uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -143,7 +140,7 @@ func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyTyp } func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_ListA.Addr(), uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -154,7 +151,7 @@ func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags u } func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_ID_List_SizeA.Addr(), uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -174,7 +171,7 @@ func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr er } func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + r0, _, _ := syscall.SyscallN(procCM_Locate_DevNodeW.Addr(), uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -189,7 +186,7 @@ func CimCloseImage(cimFSHandle FsHandle) (err error) { if err != nil { return } - syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) + syscall.SyscallN(procCimCloseImage.Addr(), uintptr(cimFSHandle)) return } @@ -198,7 +195,7 @@ func CimCloseStream(cimStreamHandle StreamHandle) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimCloseStream.Addr(), 1, uintptr(cimStreamHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCloseStream.Addr(), uintptr(cimStreamHandle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -213,7 +210,7 @@ func CimCommitImage(cimFSHandle FsHandle) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimCommitImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCommitImage.Addr(), uintptr(cimFSHandle)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -237,7 +234,7 @@ func _CimCreateAlternateStream(cimFSHandle FsHandle, path *uint16, size uint64, if hr != nil { return } - r0, _, _ := syscall.Syscall6(procCimCreateAlternateStream.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCreateAlternateStream.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -261,7 +258,7 @@ func _CimCreateFile(cimFSHandle FsHandle, path *uint16, file *CimFsFileMetadata, if hr != nil { return } - r0, _, _ := syscall.Syscall6(procCimCreateFile.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCreateFile.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -290,7 +287,7 @@ func _CimCreateHardLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimCreateHardLink.Addr(), 3, uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) + r0, _, _ := syscall.SyscallN(procCimCreateHardLink.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -314,7 +311,7 @@ func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, ci if hr != nil { return } - r0, _, _ := syscall.Syscall6(procCimCreateImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimCreateImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -338,7 +335,7 @@ func _CimDeletePath(cimFSHandle FsHandle, path *uint16) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimDeletePath.Addr(), 2, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), 0) + r0, _, _ := syscall.SyscallN(procCimDeletePath.Addr(), uintptr(cimFSHandle), uintptr(unsafe.Pointer(path))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -353,7 +350,7 @@ func CimDismountImage(volumeID *g) (hr error) { if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimDismountImage.Addr(), 1, uintptr(unsafe.Pointer(volumeID)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimDismountImage.Addr(), uintptr(unsafe.Pointer(volumeID))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -382,7 +379,7 @@ func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g if hr != nil { return } - r0, _, _ := syscall.Syscall6(procCimMountImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)), 0, 0) + r0, _, _ := syscall.SyscallN(procCimMountImage.Addr(), uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -397,7 +394,7 @@ func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uin if hr != nil { return } - r0, _, _ := syscall.Syscall(procCimWriteStream.Addr(), 3, uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize)) + r0, _, _ := syscall.SyscallN(procCimWriteStream.Addr(), uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -408,7 +405,7 @@ func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uin } func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { - r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + r0, _, _ := syscall.SyscallN(procSetJobCompartmentId.Addr(), uintptr(handle), uintptr(compartmentId)) if r0 != 0 { win32Err = syscall.Errno(r0) } @@ -416,12 +413,12 @@ func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err } func ClosePseudoConsole(hpc windows.Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(hpc)) return } func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) { - r1, _, e1 := syscall.Syscall(procCopyFileW.Addr(), 3, uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists)) + r1, _, e1 := syscall.SyscallN(procCopyFileW.Addr(), uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists)) if r1 == 0 { err = errnoErr(e1) } @@ -429,7 +426,7 @@ func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32 } func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -440,7 +437,7 @@ func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Han } func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateRemoteThread.Addr(), uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID))) handle = windows.Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -449,13 +446,13 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, } func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) amount = uint32(r0) return } func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { - r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) + r1, _, e1 := syscall.SyscallN(procIsProcessInJob.Addr(), uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) if r1 == 0 { err = errnoErr(e1) } @@ -463,18 +460,18 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result } func LocalAlloc(flags uint32, size int) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(size)) ptr = uintptr(r0) return } func LocalFree(ptr uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procLocalFree.Addr(), uintptr(ptr)) return } func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName))) + r0, _, e1 := syscall.SyscallN(procOpenJobObjectW.Addr(), uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName))) handle = windows.Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -483,7 +480,7 @@ func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (h } func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -491,7 +488,7 @@ func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobOb } func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + r0, _, e1 := syscall.SyscallN(procQueryIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount))) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -500,7 +497,7 @@ func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName } func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(hPc), uintptr(size)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -511,7 +508,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { } func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + r0, _, e1 := syscall.SyscallN(procSearchPathW.Addr(), uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -520,7 +517,7 @@ func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBuffer } func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + r0, _, e1 := syscall.SyscallN(procSetIoRateControlInformationJobObject.Addr(), uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo))) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -529,7 +526,7 @@ func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateContro } func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) { - r0, _, _ := syscall.Syscall6(procNetLocalGroupAddMembers.Addr(), 5, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries), 0) + r0, _, _ := syscall.SyscallN(procNetLocalGroupAddMembers.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries)) if r0 != 0 { status = syscall.Errno(r0) } @@ -537,7 +534,7 @@ func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32 } func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) { - r0, _, _ := syscall.Syscall6(procNetLocalGroupGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetLocalGroupGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -545,7 +542,7 @@ func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, b } func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) { - r0, _, _ := syscall.Syscall6(procNetUserAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserAdd.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err))) if r0 != 0 { status = syscall.Errno(r0) } @@ -553,7 +550,7 @@ func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) ( } func netUserDel(serverName *uint16, username *uint16) (status error) { - r0, _, _ := syscall.Syscall(procNetUserDel.Addr(), 2, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)), 0) + r0, _, _ := syscall.SyscallN(procNetUserDel.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username))) if r0 != 0 { status = syscall.Errno(r0) } @@ -561,25 +558,25 @@ func netUserDel(serverName *uint16, username *uint16) (status error) { } func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength)) status = uint32(r0) return } func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + r0, _, _ := syscall.SyscallN(procNtCreateJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) status = uint32(r0) return } func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenDirectoryObject.Addr(), 3, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) + r0, _, _ := syscall.SyscallN(procNtOpenDirectoryObject.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa))) status = uint32(r0) return } func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + r0, _, _ := syscall.SyscallN(procNtOpenJobObject.Addr(), uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) status = uint32(r0) return } @@ -593,31 +590,31 @@ func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleE if restartScan { _p1 = 1 } - r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQueryDirectoryObject.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength))) status = uint32(r0) return } func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength))) status = uint32(r0) return } func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength))) status = uint32(r0) return } func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class)) status = uint32(r0) return } func RtlNtStatusToDosError(status uint32) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosError.Addr(), uintptr(status)) if r0 != 0 { winerr = syscall.Errno(r0) } @@ -625,7 +622,7 @@ func RtlNtStatusToDosError(status uint32) (winerr error) { } func ORCloseHive(handle ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(handle), 0, 0) + r0, _, _ := syscall.SyscallN(procORCloseHive.Addr(), uintptr(handle)) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -633,7 +630,7 @@ func ORCloseHive(handle ORHKey) (win32err error) { } func ORCloseKey(handle ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procORCloseKey.Addr(), 1, uintptr(handle), 0, 0) + r0, _, _ := syscall.SyscallN(procORCloseKey.Addr(), uintptr(handle)) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -641,7 +638,7 @@ func ORCloseKey(handle ORHKey) (win32err error) { } func ORCreateHive(key *ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0) + r0, _, _ := syscall.SyscallN(procORCreateHive.Addr(), uintptr(unsafe.Pointer(key))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -658,7 +655,7 @@ func ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, se } func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) { - r0, _, _ := syscall.Syscall9(procORCreateKey.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)), 0, 0) + r0, _, _ := syscall.SyscallN(procORCreateKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -675,7 +672,7 @@ func ORDeleteKey(handle ORHKey, subKey string) (win32err error) { } func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) { - r0, _, _ := syscall.Syscall(procORDeleteKey.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(subKey)), 0) + r0, _, _ := syscall.SyscallN(procORDeleteKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -697,7 +694,7 @@ func ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, d } func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) { - r0, _, _ := syscall.Syscall6(procORGetValue.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen))) + r0, _, _ := syscall.SyscallN(procORGetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -709,7 +706,7 @@ func ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) { if len(hiveHandles) > 0 { _p0 = &hiveHandles[0] } - r0, _, _ := syscall.Syscall(procORMergeHives.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procORMergeHives.Addr(), uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -726,7 +723,7 @@ func OROpenHive(hivePath string, result *ORHKey) (win32err error) { } func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procOROpenHive.Addr(), 2, uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procOROpenHive.Addr(), uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -743,7 +740,7 @@ func OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) { } func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) { - r0, _, _ := syscall.Syscall(procOROpenKey.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.SyscallN(procOROpenKey.Addr(), uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result))) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -760,7 +757,7 @@ func ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVe } func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) { - r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion), 0, 0) + r0, _, _ := syscall.SyscallN(procORSaveHive.Addr(), uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion)) if r0 != 0 { win32err = syscall.Errno(r0) } @@ -777,7 +774,7 @@ func ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, d } func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) { - r0, _, _ := syscall.Syscall6(procORSetValue.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen), 0) + r0, _, _ := syscall.SyscallN(procORSetValue.Addr(), uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen)) if r0 != 0 { win32err = syscall.Errno(r0) } diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go index 9b619b6e6..e43d59a40 100644 --- a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go @@ -33,9 +33,6 @@ func errnoErr(e syscall.Errno) error { case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) return e } @@ -46,7 +43,7 @@ var ( ) func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { - r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + r0, _, _ := syscall.SyscallN(procSetCurrentThreadCompartmentId.Addr(), uintptr(compartmentId)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 54a93239e..47e3601d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1079,6 +1079,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -1091,6 +1094,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -20938,6 +20944,9 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -20956,6 +20965,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -41977,6 +42001,62 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 31717f9b9..5c1faee5c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.54.2" +const SDKVersion = "1.54.10" diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index 2f850dd84..a77986f7d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -69,8 +69,7 @@ func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req * // groups attached to your Auto Scaling group, the instances are also registered // with the target groups. // -// For more information, see Attach EC2 instances to your Auto Scaling group -// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-instance-asg.html) +// For more information, see Detach or attach instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-detach-attach-instances.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -969,7 +968,7 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig // about updating this limit, see Quotas for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-quotas.html) // in the Amazon EC2 Auto Scaling User Guide. // -// For more information, see Launch configurations (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html) +// For more information, see Launch configurations (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-configurations.html) // in the Amazon EC2 Auto Scaling User Guide. // // Amazon EC2 Auto Scaling configures instances launched as part of an Auto @@ -1534,7 +1533,7 @@ func (c *AutoScaling) DeletePolicyRequest(input *DeletePolicyInput) (req *reques // the underlying alarm action, but does not delete the alarm, even if it no // longer has an associated action. // -// For more information, see Deleting a scaling policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/deleting-scaling-policy.html) +// For more information, see Delete a scaling policy (https://docs.aws.amazon.com/autoscaling/ec2/userguide/deleting-scaling-policy.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3599,8 +3598,8 @@ func (c *AutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingAct // Gets information about the scaling activities in the account and Region. // // When scaling events occur, you see a record of the scaling activity in the -// scaling activities. For more information, see Verifying a scaling activity -// for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-verify-scaling-activity.html) +// scaling activities. For more information, see Verify a scaling activity for +// an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-verify-scaling-activity.html) // in the Amazon EC2 Auto Scaling User Guide. // // If the scaling event succeeds, the value of the StatusCode element in the @@ -4120,8 +4119,8 @@ func (c *AutoScaling) DescribeTerminationPolicyTypesRequest(input *DescribeTermi // // Describes the termination policies supported by Amazon EC2 Auto Scaling. // -// For more information, see Work with Amazon EC2 Auto Scaling termination policies -// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) +// For more information, see Configure termination policies for Amazon EC2 Auto +// Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4510,8 +4509,7 @@ func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req * // attached to the Auto Scaling group, the instances are deregistered from the // target groups. // -// For more information, see Detach EC2 instances from your Auto Scaling group -// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/detach-instance-asg.html) +// For more information, see Detach or attach instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-detach-attach-instances.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5521,8 +5519,8 @@ func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotification // // This configuration overwrites any existing configuration. // -// For more information, see Getting Amazon SNS notifications when your Auto -// Scaling group scales (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ASGettingNotifications.html) +// For more information, see Amazon SNS notification options for Amazon EC2 +// Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-sns-notifications.html) // in the Amazon EC2 Auto Scaling User Guide. // // If you exceed your maximum limit of SNS topics, which is 10 per Auto Scaling @@ -5723,7 +5721,7 @@ func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUp // // Creates or updates a scheduled scaling action for an Auto Scaling group. // -// For more information, see Scheduled scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/schedule_time.html) +// For more information, see Scheduled scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scheduled-scaling.html) // in the Amazon EC2 Auto Scaling User Guide. // // You can view the scheduled actions for an Auto Scaling group using the DescribeScheduledActions @@ -5825,20 +5823,18 @@ func (c *AutoScaling) PutWarmPoolRequest(input *PutWarmPoolInput) (req *request. // Creates or updates a warm pool for the specified Auto Scaling group. A warm // pool is a pool of pre-initialized EC2 instances that sits alongside the Auto // Scaling group. Whenever your application needs to scale out, the Auto Scaling -// group can draw on the warm pool to meet its new desired capacity. For more -// information and example configurations, see Warm pools for Amazon EC2 Auto -// Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html) -// in the Amazon EC2 Auto Scaling User Guide. +// group can draw on the warm pool to meet its new desired capacity. // // This operation must be called from the Region in which the Auto Scaling group -// was created. This operation cannot be called on an Auto Scaling group that -// has a mixed instances policy or a launch template or launch configuration -// that requests Spot Instances. +// was created. // // You can view the instances in the warm pool using the DescribeWarmPool API // call. If you are no longer using a warm pool, you can delete it by calling // the DeleteWarmPool API. // +// For more information, see Warm pools for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html) +// in the Amazon EC2 Auto Scaling User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6036,7 +6032,8 @@ func (c *AutoScaling) ResumeProcessesRequest(input *ScalingProcessQuery) (req *r // Resumes the specified suspended auto scaling processes, or all suspended // process, for the specified Auto Scaling group. // -// For more information, see Suspending and resuming scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html) +// For more information, see Suspend and resume Amazon EC2 Auto Scaling processes +// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6245,7 +6242,7 @@ func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) // that is lower than the current size of the group, the Auto Scaling group // uses its termination policy to determine which instances to terminate. // -// For more information, see Manual scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-manual-scaling.html) +// For more information, see Manual scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-manually.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6333,7 +6330,8 @@ func (c *AutoScaling) SetInstanceHealthRequest(input *SetInstanceHealthInput) (r // // Sets the health status of the specified instance. // -// For more information, see Health checks for Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) +// For more information, see Health checks for instances in an Auto Scaling +// group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6417,9 +6415,7 @@ func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionI // Updates the instance protection settings of the specified instances. This // operation cannot be called on instances in a warm pool. // -// For more information about preventing instances that are part of an Auto -// Scaling group from terminating on scale in, see Using instance scale-in protection -// (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) +// For more information, see Use instance scale-in protection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) // in the Amazon EC2 Auto Scaling User Guide. // // If you exceed your maximum limit of instance IDs, which is 50 per Auto Scaling @@ -6630,7 +6626,7 @@ func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req * // // If you suspend either the Launch or Terminate process types, it can prevent // other process types from functioning properly. For more information, see -// Suspending and resuming scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html) +// Suspend and resume Amazon EC2 Auto Scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html) // in the Amazon EC2 Auto Scaling User Guide. // // To resume processes that have been suspended, call the ResumeProcesses API. @@ -6731,7 +6727,7 @@ func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *Terminat // Zones. If you decrement the desired capacity, your Auto Scaling group can // become unbalanced between Availability Zones. Amazon EC2 Auto Scaling tries // to rebalance the group, and rebalancing might terminate instances in other -// zones. For more information, see Rebalancing activities (https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-benefits.html#AutoScalingBehavior.InstanceUsage) +// zones. For more information, see Manual scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-manually.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8218,7 +8214,7 @@ type CreateAutoScalingGroupInput struct { // // The amount of time, in seconds, between one scaling activity ending and another // one starting due to simple scaling policies. For more information, see Scaling - // cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) // in the Amazon EC2 Auto Scaling User Guide. // // Default: 300 seconds @@ -8256,8 +8252,8 @@ type CreateAutoScalingGroupInput struct { // The unit of measurement for the value specified for desired capacity. Amazon // EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance - // type selection only. For more information, see Creating an Auto Scaling group - // using attribute-based instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html) + // type selection only. For more information, see Create a mixed instances group + // using attribute-based instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html) // in the Amazon EC2 Auto Scaling User Guide. // // By default, Amazon EC2 Auto Scaling specifies units, which translates into @@ -8281,7 +8277,7 @@ type CreateAutoScalingGroupInput struct { // // The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health // check and cannot be disabled. For more information, see Health checks for - // Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) + // instances in an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) // in the Amazon EC2 Auto Scaling User Guide. // // Only specify EC2 if you must clear a value that was previously set. @@ -8291,8 +8287,8 @@ type CreateAutoScalingGroupInput struct { // Amazon EC2 Auto Scaling uses the configuration values from the specified // instance to create a new launch configuration. To get the instance ID, use // the Amazon EC2 DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) - // API operation. For more information, see Creating an Auto Scaling group using - // an EC2 instance (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html) + // API operation. For more information, see Create an Auto Scaling group using + // parameters from an existing instance (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html) // in the Amazon EC2 Auto Scaling User Guide. InstanceId *string `min:"1" type:"string"` @@ -8316,8 +8312,8 @@ type CreateAutoScalingGroupInput struct { // or InstanceId). // // The launch template that is specified must be configured for use with an - // Auto Scaling group. For more information, see Creating a launch template - // for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html) + // Auto Scaling group. For more information, see Create a launch template for + // an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html) // in the Amazon EC2 Auto Scaling User Guide. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` @@ -8333,7 +8329,7 @@ type CreateAutoScalingGroupInput struct { // The maximum amount of time, in seconds, that an instance can be in service. // The default is null. If specified, the value must be either 0 or a number // equal to or greater than 86,400 seconds (1 day). For more information, see - // Replacing Auto Scaling instances based on maximum instance lifetime (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) + // Replace Auto Scaling instances based on maximum instance lifetime (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) // in the Amazon EC2 Auto Scaling User Guide. MaxInstanceLifetime *int64 `type:"integer"` @@ -8360,7 +8356,7 @@ type CreateAutoScalingGroupInput struct { // Indicates whether newly launched instances are protected from termination // by Amazon EC2 Auto Scaling when scaling in. For more information about preventing - // instances from terminating on scale in, see Using instance scale-in protection + // instances from terminating on scale in, see Use instance scale-in protection // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) // in the Amazon EC2 Auto Scaling User Guide. NewInstancesProtectedFromScaleIn *bool `type:"boolean"` @@ -8404,7 +8400,7 @@ type CreateAutoScalingGroupInput struct { // A policy or a list of policies that are used to select the instance to terminate. // These policies are executed in the order that you list them. For more information, - // see Work with Amazon EC2 Auto Scaling termination policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) + // see Configure termination policies for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) // in the Amazon EC2 Auto Scaling User Guide. // // Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | @@ -8732,8 +8728,8 @@ type CreateLaunchConfigurationInput struct { // the option to assign a public IPv4 address on the subnet. // // If you specify true, each instance in the Auto Scaling group receives a unique - // public IPv4 address. For more information, see Launching Auto Scaling instances - // in a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) + // public IPv4 address. For more information, see Provide network connectivity + // for your Auto Scaling instances using Amazon VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) // in the Amazon EC2 Auto Scaling User Guide. // // If you specify this property, you must specify at least one subnet for VPCZoneIdentifier @@ -8759,7 +8755,7 @@ type CreateLaunchConfigurationInput struct { // This optimization is not available with all instance types. Additional fees // are incurred when you enable EBS optimization for an instance type that is // not EBS-optimized by default. For more information, see Amazon EBS-optimized - // instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) + // instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) // in the Amazon EC2 User Guide for Linux Instances. // // The default value is false. @@ -8773,7 +8769,7 @@ type CreateLaunchConfigurationInput struct { IamInstanceProfile *string `min:"1" type:"string"` // The ID of the Amazon Machine Image (AMI) that was assigned during registration. - // For more information, see Finding a Linux AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) + // For more information, see Find a Linux AMI (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html) // in the Amazon EC2 User Guide for Linux Instances. // // If you specify InstanceId, an ImageId is not required. @@ -8786,8 +8782,7 @@ type CreateLaunchConfigurationInput struct { // To create a launch configuration with a block device mapping or override // any other instance attributes, specify them as part of the same request. // - // For more information, see Creating a launch configuration using an EC2 instance - // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-lc-with-instanceID.html) + // For more information, see Create a launch configuration (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html) // in the Amazon EC2 Auto Scaling User Guide. InstanceId *string `min:"1" type:"string"` @@ -8799,7 +8794,7 @@ type CreateLaunchConfigurationInput struct { // When detailed monitoring is enabled, Amazon CloudWatch generates metrics // every minute and your account is charged a fee. When you disable detailed // monitoring, CloudWatch generates metrics every 5 minutes. For more information, - // see Configure Monitoring for Auto Scaling Instances (https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) + // see Configure monitoring for Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) // in the Amazon EC2 Auto Scaling User Guide. InstanceMonitoring *InstanceMonitoring `type:"structure"` @@ -8818,7 +8813,7 @@ type CreateLaunchConfigurationInput struct { KernelId *string `min:"1" type:"string"` // The name of the key pair. For more information, see Amazon EC2 key pairs - // and Linux instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) + // and Amazon EC2 instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) // in the Amazon EC2 User Guide for Linux Instances. KeyName *string `min:"1" type:"string"` @@ -8828,8 +8823,8 @@ type CreateLaunchConfigurationInput struct { // LaunchConfigurationName is a required field LaunchConfigurationName *string `min:"1" type:"string" required:"true"` - // The metadata options for the instances. For more information, see Configuring - // the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) + // The metadata options for the instances. For more information, see Configure + // the instance metadata options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) // in the Amazon EC2 Auto Scaling User Guide. MetadataOptions *InstanceMetadataOptions `type:"structure"` @@ -8837,9 +8832,7 @@ type CreateLaunchConfigurationInput struct { // dedicated tenancy runs on isolated, single-tenant hardware and can only be // launched into a VPC. To launch dedicated instances into a shared tenancy // VPC (a VPC with the instance placement tenancy attribute set to default), - // you must set the value of this property to dedicated. For more information, - // see Configuring instance tenancy with Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-dedicated-instances.html) - // in the Amazon EC2 Auto Scaling User Guide. + // you must set the value of this property to dedicated. // // If you specify PlacementTenancy, you must specify at least one subnet for // VPCZoneIdentifier when you create your group. @@ -8855,8 +8848,8 @@ type CreateLaunchConfigurationInput struct { RamdiskId *string `min:"1" type:"string"` // A list that contains the security group IDs to assign to the instances in - // the Auto Scaling group. For more information, see Control traffic to resources - // using security groups (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) + // the Auto Scaling group. For more information, see Control traffic to your + // Amazon Web Services resources using security groups (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html) // in the Amazon Virtual Private Cloud User Guide. SecurityGroups []*string `type:"list"` @@ -11984,7 +11977,7 @@ type DesiredConfiguration struct { // Describes the launch template and the version of the launch template that // Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information - // about launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchTemplates.html) + // about launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-templates.html) // in the Amazon EC2 Auto Scaling User Guide. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` @@ -12474,7 +12467,8 @@ type DisableMetricsCollectionInput struct { // // If you omit this property, all metrics are disabled. // - // For more information, see Auto Scaling group metrics (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics) + // For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html) // in the Amazon EC2 Auto Scaling User Guide. Metrics []*string `type:"list"` } @@ -12558,9 +12552,9 @@ type Ebs struct { // Specifies whether the volume should be encrypted. Encrypted EBS volumes can // only be attached to instances that support Amazon EBS encryption. For more - // information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). - // If your AMI uses encrypted volumes, you can also only launch it on supported - // instance types. + // information, see Requirements for Amazon EBS encryption (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html) + // in the Amazon EBS User Guide. If your AMI uses encrypted volumes, you can + // also only launch it on supported instance types. // // If you are creating a volume from a snapshot, you cannot create an unencrypted // volume from an encrypted snapshot. Also, you cannot specify a KMS key ID @@ -12588,7 +12582,7 @@ type Ebs struct { // * io1: 100-64,000 IOPS // // For io1 volumes, we guarantee 64,000 IOPS only for Instances built on the - // Nitro System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Amazon Web Services Nitro System (https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html). // Other instance families guarantee performance up to 32,000 IOPS. // // Iops is supported when the volume type is gp3 or io1 and required only when @@ -12619,8 +12613,8 @@ type Ebs struct { // the size of the snapshot. VolumeSize *int64 `min:"1" type:"integer"` - // The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) - // in the Amazon EC2 User Guide for Linux Instances. + // The volume type. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html) + // in the Amazon EBS User Guide. // // Valid values: standard | io1 | gp2 | st1 | sc1 | gp3 VolumeType *string `min:"1" type:"string"` @@ -12772,7 +12766,8 @@ type EnableMetricsCollectionInput struct { // If you specify Granularity and don't specify any metrics, all metrics are // enabled. // - // For more information, see Auto Scaling group metrics (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics) + // For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html) // in the Amazon EC2 Auto Scaling User Guide. Metrics []*string `type:"list"` } @@ -12906,7 +12901,8 @@ type EnabledMetric struct { // // * GroupAndWarmPoolTotalCapacity // - // For more information, see Auto Scaling group metrics (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-cloudwatch-monitoring.html#as-group-metrics) + // For more information, see Amazon CloudWatch metrics for Amazon EC2 Auto Scaling + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-metrics.html) // in the Amazon EC2 Auto Scaling User Guide. Metric *string `min:"1" type:"string"` } @@ -13060,7 +13056,7 @@ type ExecutePolicyInput struct { // complete before executing the policy. // // Valid only if the policy type is SimpleScaling. For more information, see - // Scaling cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // Scaling cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) // in the Amazon EC2 Auto Scaling User Guide. HonorCooldown *bool `type:"boolean"` @@ -13951,7 +13947,8 @@ type Instance struct { LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // A description of the current lifecycle state. The Quarantined state is not - // used. For information about lifecycle states, see Instance lifecycle (https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html) + // used. For more information, see Amazon EC2 Auto Scaling instance lifecycle + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-lifecycle.html) // in the Amazon EC2 Auto Scaling User Guide. // // LifecycleState is a required field @@ -14080,7 +14077,7 @@ type InstanceDetails struct { LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // The lifecycle state for the instance. The Quarantined state is not used. - // For information about lifecycle states, see Instance lifecycle (https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroupLifecycle.html) + // For more information, see Amazon EC2 Auto Scaling instance lifecycle (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-lifecycle.html) // in the Amazon EC2 Auto Scaling User Guide. // // Valid values: Pending | Pending:Wait | Pending:Proceed | Quarantined | InService @@ -14255,8 +14252,8 @@ func (s *InstanceMaintenancePolicy) SetMinHealthyPercentage(v int64) *InstanceMa return s } -// The metadata options for the instances. For more information, see Configuring -// the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) +// The metadata options for the instances. For more information, see Configure +// the instance metadata options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) // in the Amazon EC2 Auto Scaling User Guide. type InstanceMetadataOptions struct { _ struct{} `type:"structure"` @@ -14690,8 +14687,8 @@ func (s *InstanceRefreshWarmPoolProgress) SetPercentageComplete(v int64) *Instan // You must specify VCpuCount and MemoryMiB. All other attributes are optional. // Any unspecified optional attribute is set to its default. // -// For more information, see Creating an Auto Scaling group using attribute-based -// instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html) +// For more information, see Create a mixed instances group using attribute-based +// instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html) // in the Amazon EC2 Auto Scaling User Guide. For help determining which instance // types match your attributes before you apply them to your Auto Scaling group, // see Preview instance types with specified attributes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html#ec2fleet-get-instance-types-from-instance-requirements) @@ -15366,14 +15363,15 @@ type LaunchConfiguration struct { // IPv4 address on the subnet. If the instance is launched into a nondefault // subnet, the default is not to assign a public IPv4 address, unless you enabled // the option to assign a public IPv4 address on the subnet. For more information, - // see Launching Auto Scaling instances in a VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) + // see Provide network connectivity for your Auto Scaling instances using Amazon + // VPC (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html) // in the Amazon EC2 Auto Scaling User Guide. AssociatePublicIpAddress *bool `type:"boolean"` // The block device mapping entries that define the block devices to attach // to the instances at launch. By default, the block devices specified in the // block device mapping for the AMI are used. For more information, see Block - // Device Mapping (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) + // device mappings (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html) // in the Amazon EC2 User Guide for Linux Instances. BlockDeviceMappings []*BlockDeviceMapping `type:"list"` @@ -15389,9 +15387,9 @@ type LaunchConfiguration struct { CreatedTime *time.Time `type:"timestamp" required:"true"` // Specifies whether the launch configuration is optimized for EBS I/O (true) - // or not (false). For more information, see Amazon EBS-Optimized Instances - // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) in - // the Amazon EC2 User Guide for Linux Instances. + // or not (false). For more information, see Amazon EBS-optimized instances + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-optimized.html) + // in the Amazon EC2 User Guide for Linux Instances. EbsOptimized *bool `type:"boolean"` // The name or the Amazon Resource Name (ARN) of the instance profile associated @@ -15411,7 +15409,7 @@ type LaunchConfiguration struct { // Controls whether instances in this group are launched with detailed (true) // or basic (false) monitoring. // - // For more information, see Configure Monitoring for Auto Scaling Instances + // For more information, see Configure monitoring for Auto Scaling instances // (https://docs.aws.amazon.com/autoscaling/latest/userguide/enable-as-instance-metrics.html) // in the Amazon EC2 Auto Scaling User Guide. InstanceMonitoring *InstanceMonitoring `type:"structure"` @@ -15428,7 +15426,7 @@ type LaunchConfiguration struct { // The name of the key pair. // - // For more information, see Amazon EC2 Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) + // For more information, see Amazon EC2 key pairs and Amazon EC2 instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) // in the Amazon EC2 User Guide for Linux Instances. KeyName *string `min:"1" type:"string"` @@ -15440,33 +15438,29 @@ type LaunchConfiguration struct { // LaunchConfigurationName is a required field LaunchConfigurationName *string `min:"1" type:"string" required:"true"` - // The metadata options for the instances. For more information, see Configuring - // the Instance Metadata Options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) + // The metadata options for the instances. For more information, see Configure + // the instance metadata options (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-config.html#launch-configurations-imds) // in the Amazon EC2 Auto Scaling User Guide. MetadataOptions *InstanceMetadataOptions `type:"structure"` // The tenancy of the instance, either default or dedicated. An instance with // dedicated tenancy runs on isolated, single-tenant hardware and can only be // launched into a VPC. - // - // For more information, see Configuring instance tenancy with Amazon EC2 Auto - // Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-dedicated-instances.html) - // in the Amazon EC2 Auto Scaling User Guide. PlacementTenancy *string `min:"1" type:"string"` // The ID of the RAM disk associated with the AMI. RamdiskId *string `min:"1" type:"string"` // A list that contains the security groups to assign to the instances in the - // Auto Scaling group. For more information, see Security Groups for Your VPC - // (https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html) + // Auto Scaling group. For more information, see Control traffic to your Amazon + // Web Services resources using security groups (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html) // in the Amazon Virtual Private Cloud User Guide. SecurityGroups []*string `type:"list"` // The maximum hourly price to be paid for any Spot Instance launched to fulfill // the request. Spot Instances are launched when the price you specify exceeds // the current Spot price. For more information, see Requesting Spot Instances - // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html) + // for fault-tolerant and flexible applications (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-template-spot-instances.html) // in the Amazon EC2 Auto Scaling User Guide. SpotPrice *string `min:"1" type:"string"` @@ -15722,7 +15716,7 @@ type LaunchTemplateOverrides struct { // The instance type, such as m3.xlarge. You must specify an instance type that // is supported in your requested Region and Availability Zones. For more information, // see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) - // in the Amazon Elastic Compute Cloud User Guide. + // in the Amazon EC2 User Guide for Linux Instances. // // You can specify up to 40 instance types per Auto Scaling group. InstanceType *string `min:"1" type:"string"` @@ -15748,8 +15742,8 @@ type LaunchTemplateOverrides struct { // For example, if there are two units remaining to fulfill capacity, and Amazon // EC2 Auto Scaling can only launch an instance with a WeightedCapacity of five // units, the instance is launched, and the desired capacity is exceeded by - // three units. For more information, see Configuring instance weighting for - // Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html) + // three units. For more information, see Configure an Auto Scaling group to + // use instance weights (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-mixed-instances-groups-instance-weighting.html) // in the Amazon EC2 Auto Scaling User Guide. Value must be in the range of // 1–999. // @@ -15834,7 +15828,7 @@ func (s *LaunchTemplateOverrides) SetWeightedCapacity(v string) *LaunchTemplateO // Describes the launch template and the version of the launch template that // Amazon EC2 Auto Scaling uses to launch Amazon EC2 instances. For more information -// about launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchTemplates.html) +// about launch templates, see Launch templates (https://docs.aws.amazon.com/autoscaling/ec2/userguide/launch-templates.html) // in the Amazon EC2 Auto Scaling User Guide. type LaunchTemplateSpecification struct { _ struct{} `type:"structure"` @@ -16087,7 +16081,7 @@ type LifecycleHookSpecification struct { // The ARN of the IAM role that allows the Auto Scaling group to publish to // the specified notification target. For information about creating this role, - // see Configure a notification target for a lifecycle hook (https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html#lifecycle-hook-notification-target) + // see Prepare to add a lifecycle hook to your Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/prepare-for-lifecycle-notifications.html) // in the Amazon EC2 Auto Scaling User Guide. // // Valid only if the notification target is an Amazon SNS topic or an Amazon @@ -17240,14 +17234,21 @@ type PredictiveScalingConfiguration struct { // // The following are possible values: // - // * HonorMaxCapacity - Amazon EC2 Auto Scaling cannot scale out capacity - // higher than the maximum capacity. The maximum capacity is enforced as - // a hard limit. + // * HonorMaxCapacity - Amazon EC2 Auto Scaling can't increase the maximum + // capacity of the group when the forecast capacity is close to or exceeds + // the maximum capacity. // - // * IncreaseMaxCapacity - Amazon EC2 Auto Scaling can scale out capacity - // higher than the maximum capacity when the forecast capacity is close to - // or exceeds the maximum capacity. The upper limit is determined by the - // forecasted capacity and the value for MaxCapacityBuffer. + // * IncreaseMaxCapacity - Amazon EC2 Auto Scaling can increase the maximum + // capacity of the group when the forecast capacity is close to or exceeds + // the maximum capacity. The upper limit is determined by the forecasted + // capacity and the value for MaxCapacityBuffer. + // + // Use caution when allowing the maximum capacity to be automatically increased. + // This can lead to more instances being launched than intended if the increased + // maximum capacity is not monitored and managed. The increased maximum capacity + // then becomes the new normal maximum capacity for the Auto Scaling group until + // you manually update it. The maximum capacity does not automatically decrease + // back to the original maximum. MaxCapacityBreachBehavior *string `type:"string" enum:"PredictiveScalingMaxCapacityBreachBehavior"` // The size of the capacity buffer to use when the forecast capacity is close @@ -17977,7 +17978,7 @@ func (s *PredictiveScalingPredefinedScalingMetric) SetResourceLabel(v string) *P // Describes a process type. // -// For more information, see Scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types) +// For more information, see Types of processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types) // in the Amazon EC2 Auto Scaling User Guide. type ProcessType struct { _ struct{} `type:"structure"` @@ -18335,14 +18336,14 @@ type PutScalingPolicyInput struct { // cooldown. // // Valid only if the policy type is SimpleScaling. For more information, see - // Scaling cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // Scaling cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) // in the Amazon EC2 Auto Scaling User Guide. // // Default: None Cooldown *int64 `type:"integer"` // Indicates whether the scaling policy is enabled or disabled. The default - // is enabled. For more information, see Disabling a scaling policy for an Auto + // is enabled. For more information, see Disable a scaling policy for an Auto // Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-enable-disable-scaling-policy.html) // in the Amazon EC2 Auto Scaling User Guide. Enabled *bool `type:"boolean"` @@ -19122,7 +19123,7 @@ type RefreshPreferences struct { // number must be unique. To replace all instances in the Auto Scaling group, // the last number in the array must be 100. // - // For usage examples, see Adding checkpoints to an instance refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-adding-checkpoints-instance-refresh.html) + // For usage examples, see Add checkpoints to an instance refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-adding-checkpoints-instance-refresh.html) // in the Amazon EC2 Auto Scaling User Guide. CheckpointPercentages []*int64 `type:"list"` @@ -20132,9 +20133,9 @@ type SetInstanceHealthInput struct { // Set this to False, to have the call not respect the grace period associated // with the group. // - // For more information about the health check grace period, see CreateAutoScalingGroup - // (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_CreateAutoScalingGroup.html) - // in the Amazon EC2 Auto Scaling API Reference. + // For more information about the health check grace period, see Set the health + // check grace period for an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html) + // in the Amazon EC2 Auto Scaling User Guide. ShouldRespectGracePeriod *bool `type:"boolean"` } @@ -20590,7 +20591,7 @@ func (s SuspendProcessesOutput) GoString() string { // Describes an auto scaling process that has been suspended. // -// For more information, see Scaling processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types) +// For more information, see Types of processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types) // in the Amazon EC2 Auto Scaling User Guide. type SuspendedProcess struct { _ struct{} `type:"structure"` @@ -21442,7 +21443,7 @@ type UpdateAutoScalingGroupInput struct { // // The amount of time, in seconds, between one scaling activity ending and another // one starting due to simple scaling policies. For more information, see Scaling - // cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-scaling-cooldowns.html) // in the Amazon EC2 Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` @@ -21474,8 +21475,8 @@ type UpdateAutoScalingGroupInput struct { // The unit of measurement for the value specified for desired capacity. Amazon // EC2 Auto Scaling supports DesiredCapacityType for attribute-based instance - // type selection only. For more information, see Creating an Auto Scaling group - // using attribute-based instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-instance-type-requirements.html) + // type selection only. For more information, see Create a mixed instances group + // using attribute-based instance type selection (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html) // in the Amazon EC2 Auto Scaling User Guide. // // By default, Amazon EC2 Auto Scaling specifies units, which translates into @@ -21497,7 +21498,7 @@ type UpdateAutoScalingGroupInput struct { // // The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health // check and cannot be disabled. For more information, see Health checks for - // Auto Scaling instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html) + // instances in an Auto Scaling group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) // in the Amazon EC2 Auto Scaling User Guide. // // Only specify EC2 if you must clear a value that was previously set. @@ -21544,7 +21545,7 @@ type UpdateAutoScalingGroupInput struct { // Indicates whether newly launched instances are protected from termination // by Amazon EC2 Auto Scaling when scaling in. For more information about preventing - // instances from terminating on scale in, see Using instance scale-in protection + // instances from terminating on scale in, see Use instance scale-in protection // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-protection.html) // in the Amazon EC2 Auto Scaling User Guide. NewInstancesProtectedFromScaleIn *bool `type:"boolean"` @@ -21566,7 +21567,7 @@ type UpdateAutoScalingGroupInput struct { // A policy or a list of policies that are used to select the instances to terminate. // The policies are executed in the order that you list them. For more information, - // see Work with Amazon EC2 Auto Scaling termination policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) + // see Configure termination policies for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-termination-policies.html) // in the Amazon EC2 Auto Scaling User Guide. // // Valid values: Default | AllocationStrategy | ClosestToNextInstanceHour | diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go index 5a6d19efd..e2f78c724 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/doc.go @@ -7,7 +7,7 @@ // EC2 instances based on user-defined scaling policies, scheduled actions, // and health checks. // -// For more information, see the Amazon EC2 Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/ec2/userguide/) +// For more information, see the Amazon EC2 Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html) // and the Amazon EC2 Auto Scaling API Reference (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/Welcome.html). // // See https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01 for more information on this service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index dfe8301ef..b05879b01 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -196916,6 +196916,51 @@ const ( // InstanceTypeU7in32tb224xlarge is a InstanceType enum value InstanceTypeU7in32tb224xlarge = "u7in-32tb.224xlarge" + + // InstanceTypeU7ib12tb224xlarge is a InstanceType enum value + InstanceTypeU7ib12tb224xlarge = "u7ib-12tb.224xlarge" + + // InstanceTypeC7gnMetal is a InstanceType enum value + InstanceTypeC7gnMetal = "c7gn.metal" + + // InstanceTypeR8gMedium is a InstanceType enum value + InstanceTypeR8gMedium = "r8g.medium" + + // InstanceTypeR8gLarge is a InstanceType enum value + InstanceTypeR8gLarge = "r8g.large" + + // InstanceTypeR8gXlarge is a InstanceType enum value + InstanceTypeR8gXlarge = "r8g.xlarge" + + // InstanceTypeR8g2xlarge is a InstanceType enum value + InstanceTypeR8g2xlarge = "r8g.2xlarge" + + // InstanceTypeR8g4xlarge is a InstanceType enum value + InstanceTypeR8g4xlarge = "r8g.4xlarge" + + // InstanceTypeR8g8xlarge is a InstanceType enum value + InstanceTypeR8g8xlarge = "r8g.8xlarge" + + // InstanceTypeR8g12xlarge is a InstanceType enum value + InstanceTypeR8g12xlarge = "r8g.12xlarge" + + // InstanceTypeR8g16xlarge is a InstanceType enum value + InstanceTypeR8g16xlarge = "r8g.16xlarge" + + // InstanceTypeR8g24xlarge is a InstanceType enum value + InstanceTypeR8g24xlarge = "r8g.24xlarge" + + // InstanceTypeR8g48xlarge is a InstanceType enum value + InstanceTypeR8g48xlarge = "r8g.48xlarge" + + // InstanceTypeR8gMetal24xl is a InstanceType enum value + InstanceTypeR8gMetal24xl = "r8g.metal-24xl" + + // InstanceTypeR8gMetal48xl is a InstanceType enum value + InstanceTypeR8gMetal48xl = "r8g.metal-48xl" + + // InstanceTypeMac2M1ultraMetal is a InstanceType enum value + InstanceTypeMac2M1ultraMetal = "mac2-m1ultra.metal" ) // InstanceType_Values returns all elements of the InstanceType enum @@ -197726,6 +197771,21 @@ func InstanceType_Values() []string { InstanceTypeU7in16tb224xlarge, InstanceTypeU7in24tb224xlarge, InstanceTypeU7in32tb224xlarge, + InstanceTypeU7ib12tb224xlarge, + InstanceTypeC7gnMetal, + InstanceTypeR8gMedium, + InstanceTypeR8gLarge, + InstanceTypeR8gXlarge, + InstanceTypeR8g2xlarge, + InstanceTypeR8g4xlarge, + InstanceTypeR8g8xlarge, + InstanceTypeR8g12xlarge, + InstanceTypeR8g16xlarge, + InstanceTypeR8g24xlarge, + InstanceTypeR8g48xlarge, + InstanceTypeR8gMetal24xl, + InstanceTypeR8gMetal48xl, + InstanceTypeMac2M1ultraMetal, } } diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 1706f7116..52556304f 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "maps" internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache" "github.com/containers/image/v5/manifest" @@ -12,7 +13,6 @@ import ( "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" ) var ( @@ -287,7 +287,7 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom maps.Copy(*annotations, d.uploadedAnnotations) } -// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob) +// recordValidatedDigestData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob) // and the original srcInfo (which the caller guarantees has been validated). // This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties. func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo, diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index ad1453fcb..996a4e2d7 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os" + "slices" "time" "github.com/containers/image/v5/docker/reference" @@ -25,7 +26,6 @@ import ( encconfig "github.com/containers/ocicrypt/config" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "golang.org/x/sync/semaphore" "golang.org/x/term" ) diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index 1305676d7..4259d355b 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -2,13 +2,13 @@ package copy import ( "fmt" + "maps" + "slices" "strings" "github.com/containers/image/v5/types" "github.com/containers/ocicrypt" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) // isOciEncrypted returns a bool indicating if a mediatype is encrypted @@ -47,13 +47,17 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo desc := imgspecv1.Descriptor{ Annotations: stream.info.Annotations, } - reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false) + // DecryptLayer supposedly returns a digest of the decrypted stream. + // In pratice, that value is never set in the current implementation. + // And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key, + // i.e. it doesn’t authenticate the origin of the metadata in any way. + reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false) if err != nil { return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) } stream.reader = reader - stream.info.Digest = decryptedDigest + stream.info.Digest = "" stream.info.Size = -1 maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { return strings.HasPrefix(k, "org.opencontainers.image.enc") diff --git a/vendor/github.com/containers/image/v5/copy/manifest.go b/vendor/github.com/containers/image/v5/copy/manifest.go index 60ea92aae..97837f9f2 100644 --- a/vendor/github.com/containers/image/v5/copy/manifest.go +++ b/vendor/github.com/containers/image/v5/copy/manifest.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "slices" "strings" internalManifest "github.com/containers/image/v5/internal/manifest" @@ -13,7 +14,6 @@ import ( "github.com/containers/image/v5/types" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. @@ -74,7 +74,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest srcType := in.srcMIMEType normalizedSrcType := manifest.NormalizedMIMEType(srcType) if srcType != normalizedSrcType { - logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType) + logrus.Debugf("Source manifest MIME type %q, treating it as %q", srcType, normalizedSrcType) srcType = normalizedSrcType } @@ -237,7 +237,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport } } - logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) + logrus.Debugf("Manifest list has MIME type %q, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) if len(prioritizedTypes.list) == 0 { return "", nil, fmt.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) } diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index a219b58b6..009a067ce 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -5,6 +5,8 @@ import ( "context" "errors" "fmt" + "maps" + "slices" "sort" "strings" @@ -17,8 +19,6 @@ import ( digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) type instanceCopyKind int diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index 1d092dba6..053650b8d 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "math" "time" "github.com/containers/image/v5/internal/private" @@ -151,12 +152,18 @@ type blobChunkAccessorProxy struct { // The specified chunks must be not overlapping and sorted by their offset. // The readers must be fully consumed, in the order they are returned, before blocking // to read the next chunk. +// If the Length for the last chunk is set to math.MaxUint64, then it +// fully fetches the remaining data from the offset to the end of the blob. func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { start := time.Now() rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks) if err == nil { total := int64(0) for _, c := range chunks { + // do not update the progress bar if there is a chunk with unknown length. + if c.Length == math.MaxUint64 { + return rc, errs, err + } total += int64(c.Length) } s.bar.EwmaIncrInt64(total, time.Since(start)) diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 3ecbe1ef7..9d544f561 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "reflect" + "slices" "strings" "sync" @@ -25,7 +26,6 @@ import ( imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "github.com/vbauerster/mpb/v8" - "golang.org/x/exp/slices" ) // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -707,7 +707,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType) logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v", srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression) - canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType) + canSubstitute := ic.canSubstituteBlobs && canChangeLayerCompression var requiredCompression *compressiontypes.Algorithm if ic.requireCompressionFormatMatch { diff --git a/vendor/github.com/containers/image/v5/directory/directory_dest.go b/vendor/github.com/containers/image/v5/directory/directory_dest.go index 3a8fdf0d8..c9b390318 100644 --- a/vendor/github.com/containers/image/v5/directory/directory_dest.go +++ b/vendor/github.com/containers/image/v5/directory/directory_dest.go @@ -15,6 +15,7 @@ import ( "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -263,7 +264,7 @@ func (d *dirImageDestination) Commit(context.Context, types.UnparsedImage) error // returns true if path exists func pathExists(path string) (bool, error) { - _, err := os.Stat(path) + err := fileutils.Exists(path) if err == nil { return true, nil } diff --git a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go index 32ae1ae8a..69c1e0727 100644 --- a/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go +++ b/vendor/github.com/containers/image/v5/directory/explicitfilepath/path.go @@ -4,6 +4,8 @@ import ( "fmt" "os" "path/filepath" + + "github.com/containers/storage/pkg/fileutils" ) // ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. @@ -11,7 +13,7 @@ import ( // a non-existent name (but not a symlink pointing to a non-existent name) // This is intended as a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. func ResolvePathToFullyExplicit(path string) (string, error) { - switch _, err := os.Lstat(path); { + switch err := fileutils.Lexists(path); { case err == nil: return resolveExistingPathToFullyExplicit(path) case os.IsNotExist(err): diff --git a/vendor/github.com/containers/image/v5/docker/archive/reader.go b/vendor/github.com/containers/image/v5/docker/archive/reader.go index 875a15257..70c4fbc71 100644 --- a/vendor/github.com/containers/image/v5/docker/archive/reader.go +++ b/vendor/github.com/containers/image/v5/docker/archive/reader.go @@ -78,7 +78,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) { } nt, ok := parsedTag.(reference.NamedTagged) if !ok { - return nil, fmt.Errorf("Invalid tag %s (%s): does not contain a tag", tag, parsedTag.String()) + return nil, fmt.Errorf("Invalid tag %q (%s): does not contain a tag", tag, parsedTag.String()) } ref, err := newReference(r.path, nt, -1, r.archive, nil) if err != nil { diff --git a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go index 55431db13..9b880a2e7 100644 --- a/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go @@ -116,7 +116,7 @@ func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) err return fmt.Errorf("parsing docker load progress: %w", err) } if msg.Error != nil { - return fmt.Errorf("docker engine reported: %s", msg.Error.Message) + return fmt.Errorf("docker engine reported: %q", msg.Error.Message) } } return nil // No error reported = success diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go index 11b42c6e0..0a0064576 100644 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ b/vendor/github.com/containers/image/v5/docker/distribution_error.go @@ -21,10 +21,10 @@ import ( "fmt" "io" "net/http" + "slices" "github.com/docker/distribution/registry/api/errcode" dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge" - "golang.org/x/exp/slices" ) // errNoErrorsInBody is returned when an HTTP response body parses to an empty diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index 737c778a0..94cbcb1d9 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -18,6 +18,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/multierr" "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/useragent" "github.com/containers/image/v5/manifest" @@ -25,6 +26,7 @@ import ( "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/docker/distribution/registry/api/errcode" v2 "github.com/docker/distribution/registry/api/v2" @@ -186,7 +188,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { } fullCertDirPath = filepath.Join(hostCertDir, hostPort) - _, err := os.Stat(fullCertDirPath) + err := fileutils.Exists(fullCertDirPath) if err == nil { break } @@ -497,8 +499,8 @@ func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { // Checks if the auth headers in the response contain an indication of a failed // authorizdation because of an "insufficient_scope" error. If that's the case, // returns the required scope to be used for fetching a new token. -func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) { - if err == nil && res.StatusCode == http.StatusUnauthorized { +func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) { + if res.StatusCode == http.StatusUnauthorized { challenges := parseAuthHeader(res.Header) for _, challenge := range challenges { if challenge.Scheme == "bearer" { @@ -557,6 +559,9 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri attempts := 0 for { res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope) + if err != nil { + return nil, err + } attempts++ // By default we use pre-defined scopes per operation. In @@ -572,27 +577,29 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri // We also cannot retry with a body (stream != nil) as stream // was already read if attempts == 1 && stream == nil && auth != noAuth { - if retry, newScope := needsRetryWithUpdatedScope(err, res); retry { + if retry, newScope := needsRetryWithUpdatedScope(res); retry { logrus.Debug("Detected insufficient_scope error, will retry request with updated scope") + res.Body.Close() // Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently // expect the insufficient_scope errors to happen for those callers. If that changes, we can add support // for more than one extra scope. res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope) + if err != nil { + return nil, err + } extraScope = newScope } } - if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately + + if res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately stream != nil || // We can't retry with a body (which is not restartable in the general case) attempts == backoffNumIterations { - return res, err + return res, nil } // close response body before retry or context done res.Body.Close() - delay = parseRetryAfter(res, delay) - if delay > backoffMaxDelay { - delay = backoffMaxDelay - } + delay = min(parseRetryAfter(res, delay), backoffMaxDelay) logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds()) select { case <-ctx.Done(): @@ -671,10 +678,14 @@ func parseRegistryWarningHeader(header string) string { // warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ] // distribution-spec requires warn-code=299, warn-agent="-", warn-date missing - if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) { + header, ok := strings.CutPrefix(header, expectedPrefix) + if !ok { + return "" + } + header, ok = strings.CutSuffix(header, expectedSuffix) + if !ok { return "" } - header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)] // ”Recipients that process the value of a quoted-string MUST handle a quoted-pair // as if it were replaced by the octet following the backslash.”, so let’s do that… @@ -1009,11 +1020,7 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R if remoteErrors == nil { return nil, 0, nil // fallback to non-external blob } - err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0]) - for _, e := range remoteErrors[1:] { - err = fmt.Errorf("%s, %w", err, e) - } - return nil, 0, err + return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors)) } func getBlobSize(resp *http.Response) int64 { @@ -1090,6 +1097,11 @@ func isManifestUnknownError(err error) bool { if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" { return true } + // Harbor v2.10.2 + if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && strings.Contains(strings.ToLower(e.Message), "not found") { + return true + } + // opencontainers/distribution-spec does not require the errcode.Error payloads to be used, // but specifies that the HTTP status must be 404. var unexpected *unexpectedHTTPResponseError diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go index 4c80bb2b5..9741afc3f 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image.go @@ -14,6 +14,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) // Image is a Docker-specific implementation of types.ImageCloser with a few extra methods @@ -90,6 +91,14 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types. } for _, tag := range tagsHolder.Tags { if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values + // Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory, + // contrary to the tag format specified in + // https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 , + // include digests in the list. + if _, err := digest.Parse(tag); err == nil { + logrus.Debugf("Ignoring invalid tag %q matching a digest format", tag) + continue + } return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err) } tags = append(tags, tag) diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index a29150eb7..7f59ea3fe 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -8,10 +8,12 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "net/url" "os" "path/filepath" + "slices" "strings" "github.com/containers/image/v5/docker/reference" @@ -34,8 +36,6 @@ import ( "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) type dockerImageDestination struct { @@ -347,35 +347,24 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, } // Then try reusing blobs from other locations. - candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute) + candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, blobinfocache.CandidateLocations2Options{ + CanSubstitute: options.CanSubstitute, + PossibleManifestFormats: options.PossibleManifestFormats, + RequiredCompression: options.RequiredCompression, + }) for _, candidate := range candidates { - var err error - compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName) - if err != nil { - logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err) - continue - } var candidateRepo reference.Named if !candidate.UnknownLocation { + var err error candidateRepo, err = parseBICLocationReference(candidate.Location) if err != nil { logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) continue } } - if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) { - if !candidate.UnknownLocation { - logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(), - optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) - } else { - logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), - optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) - } - continue - } if !candidate.UnknownLocation { - if candidate.CompressorName != blobinfocache.Uncompressed { - logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name()) + if candidate.CompressionAlgorithm != nil { + logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name()) } else { logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name()) } @@ -390,8 +379,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, continue } } else { - if candidate.CompressorName != blobinfocache.Uncompressed { - logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName) + if candidate.CompressionAlgorithm != nil { + logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressionAlgorithm.Name()) } else { logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String()) } @@ -442,8 +431,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return true, private.ReusedBlob{ Digest: candidate.Digest, Size: size, - CompressionOperation: compressionOperation, - CompressionAlgorithm: compressionAlgorithm}, nil + CompressionOperation: candidate.CompressionOperation, + CompressionAlgorithm: candidate.CompressionAlgorithm}, nil } return false, private.ReusedBlob{}, nil diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go index 274cd6dd2..a2b6dbed7 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_src.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "math" "mime" "mime/multipart" "net/http" @@ -260,9 +261,15 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, } currentOffset += toSkip } + var reader io.Reader + if c.Length == math.MaxUint64 { + reader = body + } else { + reader = io.LimitReader(body, int64(c.Length)) + } s := signalCloseReader{ closed: make(chan struct{}), - stream: io.NopCloser(io.LimitReader(body, int64(c.Length))), + stream: io.NopCloser(reader), consumeStream: true, } streams <- s @@ -343,12 +350,24 @@ func parseMediaType(contentType string) (string, map[string]string, error) { // The specified chunks must be not overlapping and sorted by their offset. // The readers must be fully consumed, in the order they are returned, before blocking // to read the next chunk. +// If the Length for the last chunk is set to math.MaxUint64, then it +// fully fetches the remaining data from the offset to the end of the blob. func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { headers := make(map[string][]string) rangeVals := make([]string, 0, len(chunks)) + lastFound := false for _, c := range chunks { - rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) + if lastFound { + return nil, nil, fmt.Errorf("internal error: another chunk requested after an util-EOF chunk") + } + // If the Length is set to -1, then request anything after the specified offset. + if c.Length == math.MaxUint64 { + lastFound = true + rangeVals = append(rangeVals, fmt.Sprintf("%d-", c.Offset)) + } else { + rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) + } } headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))} diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go index 1c89302f4..c10463a43 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_transport.go +++ b/vendor/github.com/containers/image/v5/docker/docker_transport.go @@ -54,16 +54,12 @@ type dockerReference struct { // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. func ParseReference(refString string) (types.ImageReference, error) { - if !strings.HasPrefix(refString, "//") { + refString, ok := strings.CutPrefix(refString, "//") + if !ok { return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) } - // Check if ref has UnknownDigestSuffix suffixed to it - unknownDigest := false - if strings.HasSuffix(refString, UnknownDigestSuffix) { - unknownDigest = true - refString = strings.TrimSuffix(refString, UnknownDigestSuffix) - } - ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) + refString, unknownDigest := strings.CutSuffix(refString, UnknownDigestSuffix) + ref, err := reference.ParseNormalizedNamed(refString) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go index 6845893bf..362657596 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -231,7 +231,7 @@ func (r *Reader) openTarComponent(componentPath string) (io.ReadCloser, error) { } if !header.FileInfo().Mode().IsRegular() { - return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name) + return nil, fmt.Errorf("Error reading tar archive component %q: not a regular file", header.Name) } succeeded = true return &tarReadCloser{Reader: tarReader, backingFile: f}, nil @@ -262,7 +262,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, * func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) { file, err := r.openTarComponent(path) if err != nil { - return nil, fmt.Errorf("loading tar component %s: %w", path, err) + return nil, fmt.Errorf("loading tar component %q: %w", path, err) } defer file.Close() bytes, err := iolimits.ReadAtMost(file, limit) diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go index b63b5316e..3364e6c9f 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go @@ -95,10 +95,10 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error { } var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { - return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err) + return fmt.Errorf("decoding tar config %q: %w", tarManifest.Config, err) } if parsedConfig.RootFS == nil { - return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config) + return fmt.Errorf("Invalid image config (rootFS is not set): %q", tarManifest.Config) } knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig) @@ -144,7 +144,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif } layerPath := path.Clean(tarManifest.Layers[i]) if _, ok := unknownLayerSizes[layerPath]; ok { - return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) + return nil, fmt.Errorf("Layer tarfile %q used for two different DiffID values", layerPath) } li := &layerInfo{ // A new element in each iteration path: layerPath, @@ -179,7 +179,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif // the slower method of checking if it's compressed. uncompressedStream, isCompressed, err := compression.AutoDecompress(t) if err != nil { - return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err) + return nil, fmt.Errorf("auto-decompressing %q to determine its size: %w", layerPath, err) } defer uncompressedStream.Close() @@ -187,7 +187,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif if isCompressed { uncompressedSize, err = io.Copy(io.Discard, uncompressedStream) if err != nil { - return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err) + return nil, fmt.Errorf("reading %q to find its size: %w", layerPath, err) } } li.size = uncompressedSize diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go index 7f6bd0e6b..883c06117 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go @@ -9,6 +9,7 @@ import ( "io" "os" "path/filepath" + "slices" "sync" "time" @@ -19,7 +20,6 @@ import ( "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // Writer allows creating a (docker save)-formatted tar archive containing one or more images. @@ -164,7 +164,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De return fmt.Errorf("marshaling layer config: %w", err) } delete(layerConfig, "layer_id") - layerID := digest.Canonical.FromBytes(b).Hex() + layerID := digest.Canonical.FromBytes(b).Encoded() layerConfig["id"] = layerID configBytes, err := json.Marshal(layerConfig) @@ -309,10 +309,10 @@ func (w *Writer) Close() error { // NOTE: This is an internal implementation detail, not a format property, and can change // any time. func (w *Writer) configPath(configDigest digest.Digest) (string, error) { - if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly. + if err := configDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly. return "", err } - return configDigest.Hex() + ".json", nil + return configDigest.Encoded() + ".json", nil } // physicalLayerPath returns a path we choose for storing a layer with the specified digest @@ -320,15 +320,15 @@ func (w *Writer) configPath(configDigest digest.Digest) (string, error) { // NOTE: This is an internal implementation detail, not a format property, and can change // any time. func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) { - if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly. + if err := layerDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in unexpected paths, so validate explicitly. return "", err } - // Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way + // Note that this can't be e.g. filepath.Join(l.Digest.Encoded(), legacyLayerFileName); due to the way // writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described // inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load) // tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers // in the root of the tarball. - return layerDigest.Hex() + ".tar", nil + return layerDigest.Encoded() + ".tar", nil } type tarFI struct { diff --git a/vendor/github.com/containers/image/v5/docker/registries_d.go b/vendor/github.com/containers/image/v5/docker/registries_d.go index 9d651d9bd..3619c3bae 100644 --- a/vendor/github.com/containers/image/v5/docker/registries_d.go +++ b/vendor/github.com/containers/image/v5/docker/registries_d.go @@ -12,6 +12,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/rootless" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" @@ -93,7 +94,7 @@ func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) stri return sys.RegistriesDirPath } userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) - if _, err := os.Stat(userRegistriesDirPath); err == nil { + if err := fileutils.Exists(userRegistriesDirPath); err == nil { return userRegistriesDirPath } if sys != nil && sys.RootForImplicitAbsolutePaths != "" { @@ -139,7 +140,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { if config.DefaultDocker != nil { if mergedConfig.DefaultDocker != nil { - return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, + return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in %q and %q`, dockerDefaultMergedFrom, configPath) } mergedConfig.DefaultDocker = config.DefaultDocker @@ -148,7 +149,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { for nsName, nsConfig := range config.Docker { // includes config.Docker == nil if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, + return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace %q defined both in %q and %q`, nsName, nsMergedFrom[nsName], configPath) } mergedConfig.Docker[nsName] = nsConfig @@ -287,10 +288,10 @@ func (ns registryNamespace) signatureTopLevel(write bool) string { // base is not nil from the caller // NOTE: Keep this in sync with docs/signature-protocols.md! func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) { - if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly. + if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly. return nil, err } sigURL := *base - sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) + sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1) return &sigURL, nil } diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go index 2767c3950..893aa959d 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -1,8 +1,6 @@ package blobinfocache import ( - "github.com/containers/image/v5/pkg/compression" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" ) @@ -32,7 +30,7 @@ func (bic *v1OnlyBlobInfoCache) Close() { func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { } -func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 { +func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 { return nil } @@ -48,23 +46,3 @@ func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.B } return candidates } - -// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm -// values suitable for inclusion in a types.BlobInfo structure, based on the name of the -// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by -// TryReusingBlob() implementations to set values in the BlobInfo structure that they return -// upon success. -func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compressiontypes.Algorithm, error) { - switch compressorName { - case Uncompressed: - return types.Decompress, nil, nil - case UnknownCompression: - return types.PreserveOriginal, nil, nil - default: - algo, err := compression.AlgorithmByName(compressorName) - if err == nil { - return types.Compress, &algo, nil - } - return types.PreserveOriginal, nil, err - } -} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go index 4d3858ab8..c9e4aaa48 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -1,6 +1,7 @@ package blobinfocache import ( + compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" ) @@ -35,19 +36,24 @@ type BlobInfoCache2 interface { // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). - // - // If !canSubstitute, the returned candidates will match the submitted digest exactly; if - // canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look + CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 +} + +// CandidateLocations2Options are used in CandidateLocations2. +type CandidateLocations2Options struct { + // If !CanSubstitute, the returned candidates will match the submitted digest exactly; if + // CanSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look // up variants of the blob which have the same uncompressed digest. - // - // The CompressorName fields in returned data must never be UnknownCompression. - CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 + CanSubstitute bool + PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer + RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm } // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression - UnknownLocation bool // is true when `Location` for this blob is not set - Location types.BICLocationReference // not set if UnknownLocation is set to `true` + Digest digest.Digest + CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + UnknownLocation bool // is true when `Location` for this blob is not set + Location types.BICLocationReference // not set if UnknownLocation is set to `true` } diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go index c3234c377..01219e391 100644 --- a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go @@ -366,7 +366,7 @@ func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) if err := blobDigest.Validate(); err != nil { return "", err } - parts := append([]string{blobDigest.Hex()}, others...) + parts := append([]string{blobDigest.Encoded()}, others...) v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) return hex.EncodeToString(v1IDHash[:]), nil } diff --git a/vendor/github.com/containers/image/v5/internal/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go index 75e472aa7..ed57e08dd 100644 --- a/vendor/github.com/containers/image/v5/internal/image/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/image/manifest.go @@ -76,7 +76,7 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src case imgspecv1.MediaTypeImageIndex: return manifestOCI1FromImageIndex(ctx, sys, src, manblob) default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + return nil, fmt.Errorf("Unimplemented manifest MIME type %q", mt) } } diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go index df0e8e417..aaef95ff3 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "slices" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" @@ -15,7 +16,6 @@ import ( ociencspec "github.com/containers/ocicrypt/spec" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) type manifestOCI1 struct { diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go index 553569a03..9b42cfbec 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go @@ -3,40 +3,13 @@ package impl import ( "github.com/containers/image/v5/internal/manifest" "github.com/containers/image/v5/internal/private" - compression "github.com/containers/image/v5/pkg/compression/types" - "golang.org/x/exp/slices" ) -// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required -// then function performs a match against the compression requested by the caller and compression of existing blob -// (which can be nil to represent uncompressed or unknown) -func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool { - if options.RequiredCompression != nil { - if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName { - // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. - // The caller must re-compress to build those annotations. - return false - } - if candidateCompression == nil || - (options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { - return false - } - } - - // For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”; - // and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1), - // so don’t impose any restrictions if candidateCompression == nil - if options.PossibleManifestFormats != nil && candidateCompression != nil { - if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool { - return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression) - }) { - return false - } - } - - return true -} - +// OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions +// is acceptable based on opts. func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool { - return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression) + return manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: opts.PossibleManifestFormats, + RequiredCompression: opts.RequiredCompression, + }, opts.OriginalCompression) } diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go index 15aee6d42..286ae524b 100644 --- a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go +++ b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go @@ -39,6 +39,8 @@ func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool { // The specified chunks must be not overlapping and sorted by their offset. // The readers must be fully consumed, in the order they are returned, before blocking // to read the next chunk. +// If the Length for the last chunk is set to math.MaxUint64, then it +// fully fetches the remaining data from the offset to the end of the blob. func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName) } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 7ce5bb069..f847fa9cc 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -3,13 +3,13 @@ package manifest import ( "encoding/json" "fmt" + "slices" platform "github.com/containers/image/v5/internal/pkg/platform" compression "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) // Schema2PlatformSpec describes the platform which a particular manifest is @@ -164,7 +164,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest. } } } - return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) + return "", fmt.Errorf("no image found in manifest list for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) } // Serialize returns the list in a blob format. diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go index 1d60da752..1c614d124 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/list.go @@ -129,5 +129,5 @@ func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") } - return nil, fmt.Errorf("Unimplemented manifest list MIME type %s (normalized as %s)", manifestMIMEType, normalized) + return nil, fmt.Errorf("Unimplemented manifest list MIME type %q (normalized as %q)", manifestMIMEType, normalized) } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go index c77db7522..ee0ddc772 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -2,6 +2,7 @@ package manifest import ( "encoding/json" + "slices" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/libtrust" @@ -192,3 +193,39 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes return false } } + +// ReuseConditions are an input to CandidateCompressionMatchesReuseConditions; +// it is a struct to allow longer and better-documented field names. +type ReuseConditions struct { + PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer + RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm +} + +// CandidateCompressionMatchesReuseConditions returns true if a layer with candidateCompression +// (which can be nil to represent uncompressed or unknown) matches reuseConditions. +func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool { + if c.RequiredCompression != nil { + if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName { + // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. + // The caller must re-compress to build those annotations. + return false + } + if candidateCompression == nil || + (c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { + return false + } + } + + // For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”; + // and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1), + // so don’t impose any restrictions if candidateCompression == nil + if c.PossibleManifestFormats != nil && candidateCompression != nil { + if !slices.ContainsFunc(c.PossibleManifestFormats, func(mt string) bool { + return MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression) + }) { + return false + } + } + + return true +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 829852a83..67b4cfeba 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -3,8 +3,10 @@ package manifest import ( "encoding/json" "fmt" + "maps" "math" "runtime" + "slices" platform "github.com/containers/image/v5/internal/pkg/platform" compression "github.com/containers/image/v5/pkg/compression/types" @@ -12,8 +14,6 @@ import ( "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) const ( @@ -260,7 +260,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi if bestMatch != nil { return bestMatch.digest, nil } - return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) + return "", fmt.Errorf("no image found in image index for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) } func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { diff --git a/vendor/github.com/containers/image/v5/internal/multierr/multierr.go b/vendor/github.com/containers/image/v5/internal/multierr/multierr.go new file mode 100644 index 000000000..1341925c1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/multierr/multierr.go @@ -0,0 +1,34 @@ +package multierr + +import ( + "fmt" + "strings" +) + +// Format creates an error value from the input array (which should not be empty) +// If the input contains a single error value, it is returned as is. +// If there are multiple, they are formatted as a multi-error (with Unwrap() []error) with the provided initial, separator, and ending strings. +// +// Typical usage: +// +// var errs []error +// // … +// errs = append(errs, …) +// // … +// if errs != nil { return multierr.Format("Failures doing $FOO", "\n* ", "", errs)} +func Format(first, middle, last string, errs []error) error { + switch len(errs) { + case 0: + return fmt.Errorf("internal error: multierr.Format called with 0 errors") + case 1: + return errs[0] + default: + // We have to do this — and this function only really exists — because fmt.Errorf(format, errs...) is invalid: + // []error is not a valid parameter to a function expecting []any + anyErrs := make([]any, 0, len(errs)) + for _, e := range errs { + anyErrs = append(anyErrs, e) + } + return fmt.Errorf(first+"%w"+strings.Repeat(middle+"%w", len(errs)-1)+last, anyErrs...) + } +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 94002d6d4..afdce1d3d 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -21,12 +21,12 @@ import ( "fmt" "os" "runtime" + "slices" "strings" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // For Linux, the kernel has already detected the ABI, ISA and Features. @@ -64,8 +64,8 @@ func getCPUInfo(pattern string) (info string, err error) { return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern) } -func getCPUVariantWindows(arch string) string { - // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use +func getCPUVariantDarwinWindows(arch string) string { + // Darwin and Windows only support v7 for ARM32 and v8 for ARM64 and so we can use // runtime.GOARCH to determine the variants var variant string switch arch { @@ -133,8 +133,8 @@ func getCPUVariantArm() string { } func getCPUVariant(os string, arch string) string { - if os == "windows" { - return getCPUVariantWindows(arch) + if os == "darwin" || os == "windows" { + return getCPUVariantDarwinWindows(arch) } if arch == "arm" || arch == "arm64" { return getCPUVariantArm() diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 562adbea8..63fb9326d 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -143,7 +143,11 @@ type ReusedBlob struct { // ImageSourceChunk is a portion of a blob. // This API is experimental and can be changed without bumping the major version number. type ImageSourceChunk struct { + // Offset specifies the starting position of the chunk within the source blob. Offset uint64 + + // Length specifies the size of the chunk. If it is set to math.MaxUint64, + // then it refers to all the data from Offset to the end of the blob. Length uint64 } @@ -154,6 +158,8 @@ type BlobChunkAccessor interface { // The specified chunks must be not overlapping and sorted by their offset. // The readers must be fully consumed, in the order they are returned, before blocking // to read the next chunk. + // If the Length for the last chunk is set to math.MaxUint64, then it + // fully fetches the remaining data from the offset to the end of the blob. GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) } diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go index b8a9b366c..8025cd270 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go +++ b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go @@ -1,10 +1,9 @@ package signature import ( + "bytes" "encoding/json" - - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" + "maps" ) const ( @@ -45,7 +44,7 @@ type sigstoreJSONRepresentation struct { func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore { return Sigstore{ untrustedMIMEType: untrustedMimeType, - untrustedPayload: slices.Clone(untrustedPayload), + untrustedPayload: bytes.Clone(untrustedPayload), untrustedAnnotations: maps.Clone(untrustedAnnotations), } } @@ -79,7 +78,7 @@ func (s Sigstore) UntrustedMIMEType() string { return s.untrustedMIMEType } func (s Sigstore) UntrustedPayload() []byte { - return slices.Clone(s.untrustedPayload) + return bytes.Clone(s.untrustedPayload) } func (s Sigstore) UntrustedAnnotations() map[string]string { diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go index c09370406..76f270b48 100644 --- a/vendor/github.com/containers/image/v5/internal/signature/simple.go +++ b/vendor/github.com/containers/image/v5/internal/signature/simple.go @@ -1,6 +1,6 @@ package signature -import "golang.org/x/exp/slices" +import "bytes" // SimpleSigning is a “simple signing” signature. type SimpleSigning struct { @@ -10,7 +10,7 @@ type SimpleSigning struct { // SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object. func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning { return SimpleSigning{ - untrustedSignature: slices.Clone(blobChunk), + untrustedSignature: bytes.Clone(blobChunk), } } @@ -21,9 +21,9 @@ func (s SimpleSigning) FormatID() FormatID { // blobChunk returns a representation of signature as a []byte, suitable for long-term storage. // Almost everyone should use signature.Blob() instead. func (s SimpleSigning) blobChunk() ([]byte, error) { - return slices.Clone(s.untrustedSignature), nil + return bytes.Clone(s.untrustedSignature), nil } func (s SimpleSigning) UntrustedSignature() []byte { - return slices.Clone(s.untrustedSignature) + return bytes.Clone(s.untrustedSignature) } diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go index de4628115..8d9d5795f 100644 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ b/vendor/github.com/containers/image/v5/manifest/common.go @@ -67,15 +67,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} } if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mimeType)} + return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %q", name, mimeType)} } // We can't very well say “the idea of no compression is unknown” return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} } if algorithm != nil { - return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType) + return "", fmt.Errorf("unsupported MIME type for compression: %q", mimeType) } - return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType) + return "", fmt.Errorf("unsupported MIME type for decompression: %q", mimeType) } // updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index 762815570..222aa896e 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "fmt" + "slices" "strings" "time" @@ -15,7 +16,6 @@ import ( "github.com/containers/storage/pkg/regexp" "github.com/docker/docker/api/types/versions" "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" ) // Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. @@ -221,7 +221,7 @@ func (m *Schema1) fixManifestLayers() error { m.History = slices.Delete(m.History, i, i+1) m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1) } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { - return fmt.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) + return fmt.Errorf("Invalid parent ID. Expected %v, got %q", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) } } return nil @@ -342,5 +342,5 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { if err != nil { return "", err } - return digest.FromBytes(image).Hex(), nil + return digest.FromBytes(image).Encoded(), nil } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index 20b721f4c..818166834 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -54,9 +54,10 @@ type Schema2HealthConfig struct { Test []string `json:",omitempty"` // Zero means to inherit. Durations are expressed as integer nanoseconds. - StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. + StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. // Retries is the number of consecutive failures needed to consider a container as unhealthy. // Zero means inherit. @@ -294,7 +295,7 @@ func (m *Schema2) ImageID([]digest.Digest) (string, error) { if err := m.ConfigDescriptor.Digest.Validate(); err != nil { return "", err } - return m.ConfigDescriptor.Digest.Hex(), nil + return m.ConfigDescriptor.Digest.Encoded(), nil } // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go index 828b8da0b..d8f37eb45 100644 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/manifest/manifest.go @@ -166,5 +166,5 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) { return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") } // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt) + return nil, fmt.Errorf("Unimplemented manifest MIME type %q (normalized as %q)", mt, nmt) } diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 548994ffa..497cf476e 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -3,6 +3,7 @@ package manifest import ( "encoding/json" "fmt" + "slices" "strings" "github.com/containers/image/v5/internal/manifest" @@ -12,7 +13,6 @@ import ( "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) // BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. @@ -167,7 +167,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // an error if the mediatype does not support encryption func getEncryptedMediaType(mediatype string) (string, error) { if slices.Contains(strings.Split(mediatype, "+")[1:], "encrypted") { - return "", fmt.Errorf("unsupported mediaType: %v already encrypted", mediatype) + return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype) } unsuffixedMediatype := strings.Split(mediatype, "+")[0] switch unsuffixedMediatype { @@ -176,17 +176,18 @@ func getEncryptedMediaType(mediatype string) (string, error) { return mediatype + "+encrypted", nil } - return "", fmt.Errorf("unsupported mediaType to encrypt: %v", mediatype) + return "", fmt.Errorf("unsupported mediaType to encrypt: %q", mediatype) } -// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return +// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return // an error if the mediatype does not support decryption func getDecryptedMediaType(mediatype string) (string, error) { - if !strings.HasSuffix(mediatype, "+encrypted") { - return "", fmt.Errorf("unsupported mediaType to decrypt: %v", mediatype) + res, ok := strings.CutSuffix(mediatype, "+encrypted") + if !ok { + return "", fmt.Errorf("unsupported mediaType to decrypt: %q", mediatype) } - return strings.TrimSuffix(mediatype, "+encrypted"), nil + return res, nil } // Serialize returns the manifest in a blob format. @@ -259,7 +260,7 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) { if err := m.Config.Digest.Validate(); err != nil { return "", err } - return m.Config.Digest.Hex(), nil + return m.Config.Digest.Encoded(), nil } // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go index ee8409896..d68814a17 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_src.go @@ -149,6 +149,8 @@ func (s *ociArchiveImageSource) SupportsGetBlobAt() bool { // The specified chunks must be not overlapping and sorted by their offset. // The readers must be fully consumed, in the order they are returned, before blocking // to read the next chunk. +// If the Length for the last chunk is set to math.MaxUint64, then it +// fully fetches the remaining data from the offset to the end of the blob. func (s *ociArchiveImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { return s.unpackedSrc.GetBlobAt(ctx, info, chunks) } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go index 8dd54f255..bcf257df6 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_delete.go @@ -6,13 +6,13 @@ import ( "fmt" "io/fs" "os" + "slices" "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/types" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // DeleteImage deletes the named image from the directory, if supported. diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go index 305d8c9c7..a096afe0f 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_dest.go @@ -6,9 +6,11 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "runtime" + "slices" "github.com/containers/image/v5/internal/imagedestination/impl" "github.com/containers/image/v5/internal/imagedestination/stubs" @@ -16,10 +18,10 @@ import ( "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/putblobdigest" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" digest "github.com/opencontainers/go-digest" imgspec "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/slices" ) type ociImageDestination struct { @@ -301,7 +303,7 @@ func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error } func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) { if err := os.MkdirAll(path, 0755); err != nil { return err } @@ -317,7 +319,7 @@ func ensureParentDirectoryExists(path string) error { // indexExists checks whether the index location specified in the OCI reference exists. // The implementation is opinionated, since in case of unexpected errors false is returned func indexExists(ref ociReference) bool { - _, err := os.Stat(ref.indexPath()) + err := fileutils.Exists(ref.indexPath()) if err == nil { return true } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go index f5f1debc9..adfd20644 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_src.go @@ -182,19 +182,19 @@ func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io hasSupportedURL = true req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { - errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap) + errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap) continue } resp, err := s.client.Do(req) if err != nil { - errWrap = fmt.Errorf("fetching %s failed %s: %w", u, err.Error(), errWrap) + errWrap = fmt.Errorf("fetching %q failed %s: %w", u, err.Error(), errWrap) continue } if resp.StatusCode != http.StatusOK { resp.Body.Close() - errWrap = fmt.Errorf("fetching %s failed, response code not 200: %w", u, errWrap) + errWrap = fmt.Errorf("fetching %q failed, response code not 200: %w", u, errWrap) continue } diff --git a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go index 1e26dc524..816dfa7a1 100644 --- a/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/layout/oci_transport.go @@ -256,5 +256,5 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st } else { blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir) } - return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Encoded()), nil } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go index c6498f6ca..fff586bee 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift-copies.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift-copies.go @@ -14,13 +14,14 @@ import ( "path" "path/filepath" "reflect" + "slices" "strings" "time" "dario.cat/mergo" + "github.com/containers/image/v5/internal/multierr" "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" "gopkg.in/yaml.v3" ) @@ -459,12 +460,6 @@ func (config *directClientConfig) getCluster() clientcmdCluster { return mergedClusterInfo } -// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregateErr []error - // newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. // NewAggregate converts a slice of errors into an Aggregate interface, which // is itself an implementation of the error interface. If the slice is empty, @@ -485,29 +480,9 @@ func newAggregate(errlist []error) error { if len(errs) == 0 { return nil } - return aggregateErr(errs) + return multierr.Format("[", ", ", "]", errs) } -// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. -// Error is part of the error interface. -func (agg aggregateErr) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) - } - result += "]" - return result -} - -// REMOVED: aggregateErr.Errors - // errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. // errConfigurationInvalid is a set of errors indicating the configuration is invalid. type errConfigurationInvalid []error @@ -578,7 +553,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { continue } if err != nil { - errlist = append(errlist, fmt.Errorf("loading config file \"%s\": %w", filename, err)) + errlist = append(errlist, fmt.Errorf("loading config file %q: %w", filename, err)) continue } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index 2c69afbe9..63ca8371e 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -152,7 +152,7 @@ func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName str func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { _, repo, gotRepo := strings.Cut(ref, "/") if !gotRepo { - return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref) + return "", fmt.Errorf("Invalid format of docker reference %q: missing '/'", ref) } return reference.Domain(c.ref.dockerReference) + "/" + repo, nil } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 656f4518d..4170d6e20 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "net/http" + "slices" "github.com/containers/image/v5/docker" "github.com/containers/image/v5/docker/reference" @@ -21,7 +22,6 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" ) type openshiftImageDestination struct { diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go index 37387bfdc..951b5d098 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_dest.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "os/exec" "path/filepath" @@ -29,6 +30,7 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/fileutils" "github.com/klauspost/pgzip" "github.com/opencontainers/go-digest" selinux "github.com/opencontainers/selinux/go-selinux" @@ -162,7 +164,7 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream return private.UploadedBlob{}, err } - hash := blobDigest.Hex() + hash := blobDigest.Encoded() d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath} return private.UploadedBlob{Digest: blobDigest, Size: size}, nil } @@ -280,8 +282,8 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { // TODO: This can take quite some time, and should ideally be cancellable using a context.Context. - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded()) + destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Encoded(), "root") if err := ensureDirectoryExists(destinationPath); err != nil { return err } @@ -321,7 +323,7 @@ func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, } func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Encoded()) destinationPath := filepath.Dir(blob.BlobPath) return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) @@ -346,10 +348,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, d.repo = repo } - if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly. + if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. return false, private.ReusedBlob{}, err } - branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + branch := fmt.Sprintf("ociimage/%s", info.Digest.Encoded()) found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest") if err != nil || !found { @@ -477,7 +479,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. return err } - hash := layer.Digest.Hex() + hash := layer.Digest.Encoded() if err = checkLayer(hash); err != nil { return err } @@ -486,7 +488,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. return err } - hash := layer.BlobSum.Hex() + hash := layer.BlobSum.Encoded() if err = checkLayer(hash); err != nil { return err } @@ -514,7 +516,7 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er } func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) { if err := os.MkdirAll(path, 0755); err != nil { return err } diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go index a9568c2d3..85a89f253 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -190,7 +190,7 @@ func (o ostreeReader) Read(p []byte) (int, error) { if count == 0 { return 0, io.EOF } - data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + data := unsafe.Slice((*byte)(C.g_bytes_get_data(b, nil)), count) copy(p, data) return count, nil } @@ -289,7 +289,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly. return nil, -1, err } - blob := info.Digest.Hex() + blob := info.Digest.Encoded() // Ensure s.compressed is initialized. It is build by LayerInfosForCopy. if s.compressed == nil { @@ -301,7 +301,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca } compressedBlob, isCompressed := s.compressed[info.Digest] if isCompressed { - blob = compressedBlob.Hex() + blob = compressedBlob.Encoded() } branch := fmt.Sprintf("ociimage/%s", blob) @@ -424,7 +424,7 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges layerBlobs := man.LayerInfos() for _, layerBlob := range layerBlobs { - branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex()) + branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Encoded()) found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest") if err != nil || !found { return nil, err @@ -439,7 +439,10 @@ func (s *ostreeImageSource) LayerInfosForCopy(ctx context.Context, instanceDiges if err != nil { return nil, err } - uncompressedDigest := digest.Digest(uncompressedDigestStr) + uncompressedDigest, err := digest.Parse(uncompressedDigestStr) + if err != nil { + return nil, err + } blobInfo := types.BlobInfo{ Digest: uncompressedDigest, Size: uncompressedSize, diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 470fca0c1..9cd9c8f7d 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -1,13 +1,18 @@ -// Package prioritize provides utilities for prioritizing locations in +// Package prioritize provides utilities for filtering and prioritizing locations in // types.BlobInfoCache.CandidateLocations. package prioritize import ( - "sort" + "cmp" + "slices" "time" "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/internal/manifest" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/types" "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) // replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates, @@ -20,28 +25,67 @@ const replacementAttempts = 5 // This is a heuristic/guess, and could well use a different value. const replacementUnknownLocationAttempts = 2 +// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob +// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options. +// +// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known); +// if not nil, the call is assumed to be CandidateLocations2. +// +// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2 +func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) { + if v2Options == nil { + return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used. + } + + var op types.LayerCompression + var algo *compression.Algorithm + switch compressorName { + case blobinfocache.Uncompressed: + op = types.Decompress + algo = nil + case blobinfocache.UnknownCompression: + logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String()) + return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2 + default: + op = types.Compress + algo_, err := compression.AlgorithmByName(compressorName) + if err != nil { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v", + digest.String(), compressorName, err) + return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required + } + algo = &algo_ + } + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, algo) { + requiredCompresssion := "nil" + if v2Options.RequiredCompression != nil { + requiredCompresssion = v2Options.RequiredCompression.Name() + } + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v", + digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats) + return false, types.PreserveOriginal, nil + } + + return true, op, algo +} + // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) } -// candidateSortState is a local state implementing sort.Interface on candidates to prioritize, -// along with the specially-treated digest values for the implementation of sort.Interface.Less +// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize, +// along with the specially-treated digest values relevant to the ordering. type candidateSortState struct { - cs []CandidateWithTime // The entries to sort - primaryDigest digest.Digest // The digest the user actually asked for - uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest + primaryDigest digest.Digest // The digest the user actually asked for + uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest } -func (css *candidateSortState) Len() int { - return len(css.cs) -} - -func (css *candidateSortState) Less(i, j int) bool { - xi := css.cs[i] - xj := css.cs[j] - +func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { // primaryDigest entries come first, more recent first. // uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first. // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) @@ -50,43 +94,32 @@ func (css *candidateSortState) Less(i, j int) bool { if xi.Candidate.Digest != xj.Candidate.Digest { // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter if xi.Candidate.Digest == css.primaryDigest { - return true + return -1 } if xj.Candidate.Digest == css.primaryDigest { - return false + return 1 } if css.uncompressedDigest != "" { if xi.Candidate.Digest == css.uncompressedDigest { - return false + return 1 } if xj.Candidate.Digest == css.uncompressedDigest { - return true + return -1 } } } else { // xi.Candidate.Digest == xj.Candidate.Digest // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { - return xi.LastSeen.After(xj.LastSeen) + return -xi.LastSeen.Compare(xj.LastSeen) } } // Neither of the digests are primaryDigest/uncompressedDigest: - if !xi.LastSeen.Equal(xj.LastSeen) { // Order primarily by time - return xi.LastSeen.After(xj.LastSeen) + if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time + return -cmp } // Fall back to digest, if timestamps end up _exactly_ the same (how?!) - return xi.Candidate.Digest < xj.Candidate.Digest -} - -func (css *candidateSortState) Swap(i, j int) { - css.cs[i], css.cs[j] = css.cs[j], css.cs[i] -} - -func min(a, b int) int { - if a < b { - return a - } - return b + return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest) } // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the @@ -100,12 +133,10 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, var unknownLocationCandidates []CandidateWithTime // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. - // FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available. - sort.Sort(&candidateSortState{ - cs: cs, + slices.SortFunc(cs, (&candidateSortState{ primaryDigest: primaryDigest, uncompressedDigest: uncompressedDigest, - }) + }).compare) for _, candidate := range cs { if candidate.Candidate.UnknownLocation { unknownLocationCandidates = append(unknownLocationCandidates, candidate) @@ -116,7 +147,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit) remainingCapacity := totalLimit - knownLocationCandidatesUsed - unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates))) + unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates)) res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) for i := 0; i < knownLocationCandidatesUsed; i++ { res[i] = knownLocationCandidates[i].Candidate diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 16193db95..1185e9d45 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -135,14 +135,17 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso // appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory // with corresponding compression info from mem.compressors, and returns the result of appending -// them to candidates. v2Output allows including candidates with unknown location, and filters out -// candidates with unknown compression. -func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime { +// them to candidates. +// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, + v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { compressorName := blobinfocache.UnknownCompression if v, ok := mem.compressors[digest]; ok { compressorName = v } - if compressorName == blobinfocache.UnknownCompression && v2Output { + ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName) + if !ok { return candidates } locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present @@ -150,20 +153,22 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW for l, t := range locations { candidates = append(candidates, prioritize.CandidateWithTime{ Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressorName, - Location: l, + Digest: digest, + CompressionOperation: compressionOp, + CompressionAlgorithm: compressionAlgo, + Location: l, }, LastSeen: t, }) } - } else if v2Output { + } else if v2Options != nil { candidates = append(candidates, prioritize.CandidateWithTime{ Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressorName, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, + Digest: digest, + CompressionOperation: compressionOp, + CompressionAlgorithm: compressionAlgo, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, }, LastSeen: time.Time{}, }) @@ -178,24 +183,24 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) + return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, nil)) } -// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { - return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + return mem.candidateLocations(transport, scope, primaryDigest, options.CanSubstitute, &options) } -func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { +// candidateLocations implements CandidateLocations / CandidateLocations2. +// v2Options is not nil if the caller is CandidateLocations2. +func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool, + v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { mem.mutex.Lock() defer mem.mutex.Unlock() res := []prioritize.CandidateWithTime{} - res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output) + res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Options) var uncompressedDigest digest.Digest // = "" if canSubstitute { if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" { @@ -203,12 +208,12 @@ func (mem *cache) candidateLocations(transport types.ImageTransport, scope types if otherDigests != nil { for _, d := range otherDigests.Values() { if d != primaryDigest && d != uncompressedDigest { - res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output) + res = mem.appendReplacementCandidates(res, transport, scope, d, v2Options) } } } if uncompressedDigest != primaryDigest { - res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output) + res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Options) } } } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go index d8bde2fa0..a5be85a65 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -428,88 +428,86 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor } // appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), -// and returns the result of appending them to candidates. v2Output allows including candidates with unknown -// location, and filters out candidates with unknown compression. -func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) { - var rows *sql.Rows - var err error - if v2Output { - rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+ - "ON KnownLocations.digest = DigestCompressors.digest "+ - "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", - transport.Name(), scope.Opaque, digest.String()) - } else { - rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+ - "LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+ - "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", - blobinfocache.UnknownCompression, - transport.Name(), scope.Opaque, digest.String()) - } - if err != nil { - return nil, fmt.Errorf("looking up candidate locations: %w", err) - } - defer rows.Close() - - res := []prioritize.CandidateWithTime{} - for rows.Next() { - var location string - var time time.Time - var compressorName string - if err := rows.Scan(&location, &time, &compressorName); err != nil { - return nil, fmt.Errorf("scanning candidate: %w", err) - } - res = append(res, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressorName, - Location: types.BICLocationReference{Opaque: location}, - }, - LastSeen: time, - }) - } - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("iterating through locations: %w", err) - } - - if len(res) == 0 && v2Output { +// and returns the result of appending them to candidates. +// v2Options is not nil if the caller is CandidateLocations2: this allows including candidates with unknown location, and filters out candidates +// with unknown compression. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, + v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) { + compressorName := blobinfocache.UnknownCompression + if v2Options != nil { compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String()) if err != nil { return nil, fmt.Errorf("scanning compressorName: %w", err) } if found { - res = append(res, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressor, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, - }, - LastSeen: time.Time{}, - }) + compressorName = compressor } } - candidates = append(candidates, res...) + ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName) + if !ok { + return candidates, nil + } + + rows, err := tx.Query("SELECT location, time FROM KnownLocations "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + transport.Name(), scope.Opaque, digest.String()) + if err != nil { + return nil, fmt.Errorf("looking up candidate locations: %w", err) + } + defer rows.Close() + + rowAdded := false + for rows.Next() { + var location string + var time time.Time + if err := rows.Scan(&location, &time); err != nil { + return nil, fmt.Errorf("scanning candidate: %w", err) + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressionOperation: compressionOp, + CompressionAlgorithm: compressionAlgo, + Location: types.BICLocationReference{Opaque: location}, + }, + LastSeen: time, + }) + rowAdded = true + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through locations: %w", err) + } + + if !rowAdded && v2Options != nil { + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressionOperation: compressionOp, + CompressionAlgorithm: compressionAlgo, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + LastSeen: time.Time{}, + }) + } return candidates, nil } // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). -// -// If !canSubstitute, the returned candidates will match the submitted digest exactly; if -// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look -// up variants of the blob which have the same uncompressed digest. -// -// The CompressorName fields in returned data must never be UnknownCompression. -func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { - return sqc.candidateLocations(transport, scope, digest, canSubstitute, true) +func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { + return sqc.candidateLocations(transport, scope, digest, options.CanSubstitute, &options) } -func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 { +// candidateLocations implements CandidateLocations / CandidateLocations2. +// v2Options is not nil if the caller is CandidateLocations2. +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool, + v2Options *blobinfocache.CandidateLocations2Options) []blobinfocache.BICReplacementCandidate2 { var uncompressedDigest digest.Digest // = "" res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { res := []prioritize.CandidateWithTime{} - res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output) + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Options) if err != nil { return nil, err } @@ -538,7 +536,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types return nil, err } if otherDigest != primaryDigest && otherDigest != uncompressedDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output) + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) if err != nil { return nil, err } @@ -549,7 +547,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types } if uncompressedDigest != primaryDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output) + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) if err != nil { return nil, err } @@ -571,5 +569,5 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same // uncompressed digest. func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false)) + return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, nil)) } diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index c61065cb0..da2238a0b 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -13,14 +13,15 @@ import ( "strings" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/multierr" "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/ioutils" helperclient "github.com/docker/docker-credential-helpers/client" "github.com/docker/docker-credential-helpers/credentials" - "github.com/hashicorp/go-multierror" "github.com/sirupsen/logrus" ) @@ -231,7 +232,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t return types.DockerAuthConfig{}, err } - var multiErr error + var multiErr []error for _, helper := range helpers { var ( creds types.DockerAuthConfig @@ -253,7 +254,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t } if err != nil { logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) - multiErr = multierror.Append(multiErr, err) + multiErr = append(multiErr, err) continue } if creds != (types.DockerAuthConfig{}) { @@ -266,7 +267,7 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (t } } if multiErr != nil { - return types.DockerAuthConfig{}, multiErr + return types.DockerAuthConfig{}, multierr.Format("errors looking up credentials:\n\t* ", "\nt* ", "\n", multiErr) } logrus.Debugf("No credentials for %s found", key) @@ -313,7 +314,7 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s } // Make sure to collect all errors. - var multiErr error + var multiErr []error for _, helper := range helpers { var desc string var err error @@ -345,14 +346,14 @@ func SetCredentials(sys *types.SystemContext, key, username, password string) (s } } if err != nil { - multiErr = multierror.Append(multiErr, err) + multiErr = append(multiErr, err) logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) continue } logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) return desc, nil } - return "", multiErr + return "", multierr.Format("Errors storing credentials\n\t* ", "\n\t* ", "\n", multiErr) } func unsupportedNamespaceErr(helper string) error { @@ -376,53 +377,56 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error { return err } - var multiErr error isLoggedIn := false - removeFromCredHelper := func(helper string) { + removeFromCredHelper := func(helper string) error { if isNamespaced { logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) - return + return nil } err := deleteCredsFromCredHelper(helper, key) if err == nil { logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) isLoggedIn = true - return + return nil } if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) - return + return nil } - multiErr = multierror.Append(multiErr, fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err)) + return fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err) } + var multiErr []error for _, helper := range helpers { var err error switch helper { // Special-case the built-in helper for auth files. case sysregistriesv2.AuthenticationFileHelper: _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { + var helperErr error if innerHelper, exists := fileContents.CredHelpers[key]; exists { - removeFromCredHelper(innerHelper) + helperErr = removeFromCredHelper(innerHelper) } if _, ok := fileContents.AuthConfigs[key]; ok { isLoggedIn = true delete(fileContents.AuthConfigs, key) } - return true, "", multiErr + return true, "", helperErr }) if err != nil { - multiErr = multierror.Append(multiErr, err) + multiErr = append(multiErr, err) } // External helpers. default: - removeFromCredHelper(helper) + if err := removeFromCredHelper(helper); err != nil { + multiErr = append(multiErr, err) + } } } if multiErr != nil { - return multiErr + return multierr.Format("errors removing credentials\n\t* ", "\n\t*", "\n", multiErr) } if !isLoggedIn { return ErrNotLoggedIn @@ -439,7 +443,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { return err } - var multiErr error + var multiErr []error for _, helper := range helpers { var err error switch helper { @@ -479,13 +483,16 @@ func RemoveAllAuthentication(sys *types.SystemContext) error { } if err != nil { logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err) - multiErr = multierror.Append(multiErr, err) + multiErr = append(multiErr, err) continue } logrus.Debugf("All credentials removed from credential helper %s", helper) } - return multiErr + if multiErr != nil { + return multierr.Format("errors removing all credentials:\n\t* ", "\n\t* ", "\n", multiErr) + } + return nil } // prepareForEdit processes sys and key (if keyRelevant) to return: @@ -570,9 +577,9 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, runtimeDir := os.Getenv("XDG_RUNTIME_DIR") if runtimeDir != "" { // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. - // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. - _, err := os.Stat(runtimeDir) - if os.IsNotExist(err) { + // We are checking for fs.ErrNotExist here only to give the user better guidance what to do in this special case. + err := fileutils.Exists(runtimeDir) + if errors.Is(err, fs.ErrNotExist) { // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory // or made a typo while setting the environment variable, // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go index 3a11542c6..71f5bc837 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go @@ -2,6 +2,7 @@ package sysregistriesv2 import ( "fmt" + "maps" "os" "path/filepath" "reflect" @@ -9,12 +10,12 @@ import ( "github.com/BurntSushi/toml" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/internal/multierr" "github.com/containers/image/v5/internal/rootless" "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" "github.com/sirupsen/logrus" - "golang.org/x/exp/maps" ) // defaultShortNameMode is the default mode of registries.conf files if the @@ -297,11 +298,7 @@ func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAl } } if len(errs) > 0 { - err := errs[0] - for i := 1; i < len(errs); i++ { - err = fmt.Errorf("%v\n: %w", errs[i], err) - } - return nil, err + return nil, multierr.Format("", "\n", "", errs) } return &res, nil } diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go index f45fd9de1..45427a350 100644 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go @@ -13,6 +13,7 @@ import ( "github.com/BurntSushi/toml" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/regexp" "github.com/sirupsen/logrus" @@ -564,7 +565,7 @@ func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) confi // decide configPath using per-user path or system file if ctx != nil && ctx.SystemRegistriesConfPath != "" { wrapper.configPath = ctx.SystemRegistriesConfPath - } else if _, err := os.Stat(userRegistriesFilePath); err == nil { + } else if err := fileutils.Exists(userRegistriesFilePath); err == nil { // per-user registries.conf exists, not reading system dir // return config dirs from ctx or per-user one wrapper.configPath = userRegistriesFilePath diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go index c6ec84bd5..f6c0576e0 100644 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go @@ -8,11 +8,11 @@ import ( "net/http" "os" "path/filepath" + "slices" "strings" "time" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc @@ -55,9 +55,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { } tlsc.RootCAs.AppendCertsFromPEM(data) } - if strings.HasSuffix(f.Name(), ".cert") { + if base, ok := strings.CutSuffix(f.Name(), ".cert"); ok { certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" + keyName := base + ".key" logrus.Debugf(" cert: %s", fullPath) if !hasFile(fs, keyName) { return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) @@ -68,9 +68,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error { } tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert) } - if strings.HasSuffix(f.Name(), ".key") { + if base, ok := strings.CutSuffix(f.Name(), ".key"); ok { keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" + certName := base + ".cert" logrus.Debugf(" key: %s", fullPath) if !hasFile(fs, certName) { return fmt.Errorf("missing client certificate %s for key %s", certName, keyName) diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go index 261cfbe77..f8bf31034 100644 --- a/vendor/github.com/containers/image/v5/sif/src.go +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -111,7 +111,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere History: []imgspecv1.History{ { Created: &created, - CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator), + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Encoded(), os.PathSeparator), Comment: "imported from SIF, uuid: " + sifImg.ID(), }, { diff --git a/vendor/github.com/containers/image/v5/signature/docker.go b/vendor/github.com/containers/image/v5/signature/docker.go index d6075f811..b313231a8 100644 --- a/vendor/github.com/containers/image/v5/signature/docker.go +++ b/vendor/github.com/containers/image/v5/signature/docker.go @@ -5,13 +5,13 @@ package signature import ( "errors" "fmt" + "slices" "strings" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/signature/internal" "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" ) // SignOptions includes optional parameters for signing container images. @@ -76,10 +76,10 @@ func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unver validateSignedDockerReference: func(signedDockerReference string) error { signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) if err != nil { - return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)) + return internal.NewInvalidSignatureError(fmt.Sprintf("Invalid docker reference %q in signature", signedDockerReference)) } if signedRef.String() != expectedRef.String() { - return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %s does not match %s", + return internal.NewInvalidSignatureError(fmt.Sprintf("Docker reference %q does not match %q", signedDockerReference, expectedDockerReference)) } return nil diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index c11fa46a9..4e9986422 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -10,12 +10,12 @@ import ( "encoding/asn1" "errors" "fmt" + "slices" "time" "github.com/containers/image/v5/signature/internal" "github.com/sigstore/fulcio/pkg/certificate" "github.com/sigstore/sigstore/pkg/cryptoutils" - "golang.org/x/exp/slices" ) // fulcioTrustRoot contains policy allow validating Fulcio-issued certificates. @@ -178,7 +178,7 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, // == Validate the OIDC subject if !slices.Contains(untrustedCertificate.EmailAddresses, f.subjectEmail) { - return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %s not found (got %#v)", + return nil, internal.NewInvalidSignatureError(fmt.Sprintf("Required email %q not found (got %q)", f.subjectEmail, untrustedCertificate.EmailAddresses)) } diff --git a/vendor/github.com/containers/image/v5/signature/internal/json.go b/vendor/github.com/containers/image/v5/signature/internal/json.go index a9d127e65..f9efafb8e 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/json.go +++ b/vendor/github.com/containers/image/v5/signature/internal/json.go @@ -31,7 +31,7 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er return JSONFormatError(err.Error()) } if t != json.Delim('{') { - return JSONFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) + return JSONFormatError(fmt.Sprintf("JSON object expected, got %#v", t)) } for { t, err := dec.Token() @@ -45,16 +45,16 @@ func ParanoidUnmarshalJSONObject(data []byte, fieldResolver func(string) any) er key, ok := t.(string) if !ok { // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. - return JSONFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) + return JSONFormatError(fmt.Sprintf("Key string literal expected, got %#v", t)) } if seenKeys.Contains(key) { - return JSONFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) + return JSONFormatError(fmt.Sprintf("Duplicate key %q", key)) } seenKeys.Add(key) valuePtr := fieldResolver(key) if valuePtr == nil { - return JSONFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) + return JSONFormatError(fmt.Sprintf("Unknown key %q", key)) } // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. if err := dec.Decode(valuePtr); err != nil { @@ -83,7 +83,7 @@ func ParanoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string] } for key := range exactFields { if !seenKeys.Contains(key) { - return JSONFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) + return JSONFormatError(fmt.Sprintf(`Key %q missing in a JSON object`, key)) } } return nil diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index 50243da33..e79c91cf9 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -220,7 +220,7 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`) } // FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values. - // Eventually we should support them as well; doing that cleanly would require updqating to Rekor 1.3.5, which requires Go 1.21. + // Eventually we should support them as well. // Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility // issue. if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 { diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go index f8ec66564..a2609c954 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -150,7 +150,11 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error { }); err != nil { return err } - s.untrustedDockerManifestDigest = digest.Digest(digestString) + digestValue, err := digest.Parse(digestString) + if err != nil { + return NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err)) + } + s.untrustedDockerManifestDigest = digestValue return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ "docker-reference": &s.untrustedDockerReference, diff --git a/vendor/github.com/containers/image/v5/signature/policy_config.go b/vendor/github.com/containers/image/v5/signature/policy_config.go index 7eb5cab7d..8e7665c4b 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config.go @@ -24,6 +24,7 @@ import ( "github.com/containers/image/v5/signature/internal" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/regexp" ) @@ -65,7 +66,7 @@ func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) stri return sys.SignaturePolicyPath } userPolicyFilePath := filepath.Join(homeDir, userPolicyFile) - if _, err := os.Stat(userPolicyFilePath); err == nil { + if err := fileutils.Exists(userPolicyFilePath); err == nil { return userPolicyFilePath } if sys != nil && sys.RootForImplicitAbsolutePaths != "" { @@ -246,7 +247,7 @@ func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { case prTypeSigstoreSigned: res = &prSigstoreSigned{} default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type %q", typeField.Type)) } if err := json.Unmarshal(data, &res); err != nil { return nil, err @@ -278,7 +279,7 @@ func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { } if tmp.Type != prTypeInsecureAcceptAnything { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } *pr = *newPRInsecureAcceptAnything() return nil @@ -308,7 +309,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error { } if tmp.Type != prTypeReject { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } *pr = *newPRReject() return nil @@ -317,7 +318,7 @@ func (pr *prReject) UnmarshalJSON(data []byte) error { // newPRSignedBy returns a new prSignedBy if parameters are valid. func newPRSignedBy(keyType sbKeyType, keyPath string, keyPaths []string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { if !keyType.IsValid() { - return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) + return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType %q", keyType)) } keySources := 0 if keyPath != "" { @@ -409,7 +410,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error { } if tmp.Type != prTypeSignedBy { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } if signedIdentity == nil { tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() @@ -465,7 +466,7 @@ func (kt *sbKeyType) UnmarshalJSON(data []byte) error { return err } if !sbKeyType(s).IsValid() { - return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) + return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value %q", s)) } *kt = sbKeyType(s) return nil @@ -503,7 +504,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { } if tmp.Type != prTypeSignedBaseLayer { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) if err != nil { @@ -539,7 +540,7 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) case prmTypeRemapIdentity: res = &prmRemapIdentity{} default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) + return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type %q", typeField.Type)) } if err := json.Unmarshal(data, &res); err != nil { return nil, err @@ -571,7 +572,7 @@ func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { } if tmp.Type != prmTypeMatchExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } *prm = *newPRMMatchExact() return nil @@ -601,7 +602,7 @@ func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { } if tmp.Type != prmTypeMatchRepoDigestOrExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } *prm = *newPRMMatchRepoDigestOrExact() return nil @@ -631,7 +632,7 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { } if tmp.Type != prmTypeMatchRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } *prm = *newPRMMatchRepository() return nil @@ -641,10 +642,10 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { func newPRMExactReference(dockerReference string) (*prmExactReference, error) { ref, err := reference.ParseNormalizedNamed(dockerReference) if err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %q: %s", dockerReference, err.Error())) } if reference.IsNameOnly(ref) { - return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) + return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %q contains neither a tag nor digest", dockerReference)) } return &prmExactReference{ prmCommon: prmCommon{Type: prmTypeExactReference}, @@ -672,7 +673,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error { } if tmp.Type != prmTypeExactReference { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } res, err := newPRMExactReference(tmp.DockerReference) @@ -686,7 +687,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error { // newPRMExactRepository is NewPRMExactRepository, except it returns the private type. func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) + return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %q: %s", dockerRepository, err.Error())) } return &prmExactRepository{ prmCommon: prmCommon{Type: prmTypeExactRepository}, @@ -714,7 +715,7 @@ func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { } if tmp.Type != prmTypeExactRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } res, err := newPRMExactRepository(tmp.DockerRepository) @@ -787,7 +788,7 @@ func (prm *prmRemapIdentity) UnmarshalJSON(data []byte) error { } if tmp.Type != prmTypeRemapIdentity { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } res, err := newPRMRemapIdentity(tmp.Prefix, tmp.SignedPrefix) diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go index d8c6a97f1..beb5d0673 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -176,7 +176,7 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { } if tmp.Type != prTypeSigstoreSigned { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) + return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type %q", tmp.Type)) } if signedIdentity == nil { tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval.go b/vendor/github.com/containers/image/v5/signature/policy_eval.go index 4f8d0da38..ab6b89c26 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval.go @@ -94,10 +94,10 @@ const ( pcDestroyed policyContextState = "Destroyed" ) -// changeContextState changes pc.state, or fails if the state is unexpected +// changeState changes pc.state, or fails if the state is unexpected func (pc *PolicyContext) changeState(expected, new policyContextState) error { if pc.state != expected { - return fmt.Errorf(`Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) + return fmt.Errorf(`Invalid PolicyContext state, expected %q, found %q`, expected, pc.state) } pc.state = new return nil @@ -140,21 +140,21 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic // Look for a full match. identity := ref.PolicyConfigurationIdentity() if req, ok := transportScopes[identity]; ok { - logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) + logrus.Debugf(` Using transport %q policy section %q`, transportName, identity) return req } // Look for a match of the possible parent namespaces. for _, name := range ref.PolicyConfigurationNamespaces() { if req, ok := transportScopes[name]; ok { - logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) + logrus.Debugf(` Using transport %q specific policy section %q`, transportName, name) return req } } // Look for a default match for the transport. if req, ok := transportScopes[""]; ok { - logrus.Debugf(` Using transport "%s" policy section ""`, transportName) + logrus.Debugf(` Using transport %q policy section ""`, transportName) return req } } diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index a4187735b..896ca5a60 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -7,12 +7,12 @@ import ( "errors" "fmt" "os" - "strings" + "slices" + "github.com/containers/image/v5/internal/multierr" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/manifest" digest "github.com/opencontainers/go-digest" - "golang.org/x/exp/slices" ) func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image private.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { @@ -20,10 +20,10 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva case SBKeyTypeGPGKeys: case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: // FIXME? Reject this at policy parsing time already? - return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value "%s"`, string(pr.KeyType)) + return sarRejected, nil, fmt.Errorf(`Unimplemented "keyType" value %q`, string(pr.KeyType)) default: // This should never happen, newPRSignedBy ensures KeyType.IsValid() - return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value "%s"`, string(pr.KeyType)) + return sarRejected, nil, fmt.Errorf(`Unknown "keyType" value %q`, string(pr.KeyType)) } // FIXME: move this to per-context initialization @@ -77,7 +77,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva }, validateSignedDockerReference: func(ref string) error { if !pr.SignedIdentity.matchesDockerReference(image, ref) { - return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) } return nil }, @@ -123,7 +123,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.U // Huh?! This should not happen at all; treat it as any other invalid value. fallthrough default: - reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res)) } rejections = append(rejections, reason) } @@ -134,12 +134,7 @@ func (pr *prSignedBy) isRunningImageAllowed(ctx context.Context, image private.U case 1: summary = rejections[0] default: - var msgs []string - for _, e := range rejections { - msgs = append(msgs, e.Error()) - } - summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", - strings.Join(msgs, "; "))) + summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error()) } return false, summary } diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go index dcf5592a8..485165077 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -10,8 +10,8 @@ import ( "errors" "fmt" "os" - "strings" + "github.com/containers/image/v5/internal/multierr" "github.com/containers/image/v5/internal/private" "github.com/containers/image/v5/internal/signature" "github.com/containers/image/v5/manifest" @@ -194,7 +194,7 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ ValidateSignedDockerReference: func(ref string) error { if !pr.SignedIdentity.matchesDockerReference(image, ref) { - return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) + return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) } return nil }, @@ -253,7 +253,7 @@ func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image pri // Huh?! This should not happen at all; treat it as any other invalid value. fallthrough default: - reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) + reason = fmt.Errorf(`Internal error: Unexpected signature verification result %q`, string(res)) } rejections = append(rejections, reason) } @@ -270,12 +270,7 @@ func (pr *prSigstoreSigned) isRunningImageAllowed(ctx context.Context, image pri case 1: summary = rejections[0] default: - var msgs []string - for _, e := range rejections { - msgs = append(msgs, e.Error()) - } - summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", - strings.Join(msgs, "; "))) + summary = PolicyRequirementError(multierr.Format("None of the signatures were accepted, reasons: ", "; ", "", rejections).Error()) } return false, summary } diff --git a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go index 4e70c0f2e..48dbfbbde 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_reference_match.go +++ b/vendor/github.com/containers/image/v5/signature/policy_reference_match.go @@ -136,7 +136,7 @@ func (prm *prmRemapIdentity) remapReferencePrefix(ref reference.Named) (referenc newNamedRef := strings.Replace(refString, prm.Prefix, prm.SignedPrefix, 1) newParsedRef, err := reference.ParseNamed(newNamedRef) if err != nil { - return nil, fmt.Errorf(`error rewriting reference from "%s" to "%s": %v`, refString, newNamedRef, err) + return nil, fmt.Errorf(`error rewriting reference from %q to %q: %v`, refString, newNamedRef, err) } return newParsedRef, nil } diff --git a/vendor/github.com/containers/image/v5/signature/simple.go b/vendor/github.com/containers/image/v5/signature/simple.go index 56b222eda..30df997d8 100644 --- a/vendor/github.com/containers/image/v5/signature/simple.go +++ b/vendor/github.com/containers/image/v5/signature/simple.go @@ -173,7 +173,11 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { }); err != nil { return err } - s.untrustedDockerManifestDigest = digest.Digest(digestString) + digestValue, err := digest.Parse(digestString) + if err != nil { + return internal.NewInvalidSignatureError(fmt.Sprintf(`invalid docker-manifest-digest value %q: %v`, digestString, err)) + } + s.untrustedDockerManifestDigest = digestValue return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{ "docker-reference": &s.untrustedDockerReference, diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index bc9790ca3..a0b347410 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -12,6 +12,7 @@ import ( "io" "os" "path/filepath" + "slices" "sync" "sync/atomic" @@ -34,7 +35,6 @@ import ( digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) var ( @@ -59,7 +59,7 @@ type storageImageDestination struct { nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs manifest []byte // Manifest contents, temporary manifestDigest digest.Digest // Valid if len(manifest) != 0 - untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs, valid if not nil + untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet signatures []byte // Signature contents, temporary signatureses map[digest.Digest][]byte // Instance signature contents, temporary metadata storageImageMetadata // Metadata contents being built @@ -94,11 +94,11 @@ type storageImageDestinationLockProtected struct { blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest - // Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames) + // Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames) // should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer. // They are looked up in the order they are mentioned above. - diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data - blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer + diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data + indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer // Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set. filenames map[digest.Digest]string // Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set. @@ -145,13 +145,13 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* }, indexToStorageID: make(map[int]string), lockProtected: storageImageDestinationLockProtected{ - indexToAddedLayerInfo: make(map[int]addedLayerInfo), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - indexToTOCDigest: make(map[int]digest.Digest), - diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput), - blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer), - filenames: make(map[digest.Digest]string), - fileSizes: make(map[digest.Digest]int64), + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + indexToTOCDigest: make(map[int]digest.Digest), + diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput), + indexToAdditionalLayer: make(map[int]storage.AdditionalLayer), + filenames: make(map[digest.Digest]string), + fileSizes: make(map[digest.Digest]int64), }, } dest.Compat = impl.AddCompat(dest) @@ -167,13 +167,11 @@ func (s *storageImageDestination) Reference() types.ImageReference { // Close cleans up the temporary directory and additional layer store handlers. func (s *storageImageDestination) Close() error { // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. - for _, al := range s.lockProtected.blobAdditionalLayer { + for _, al := range s.lockProtected.indexToAdditionalLayer { al.Release() } for _, v := range s.lockProtected.diffOutputs { - if v.Target != "" { - _ = s.imageRef.transport.store.CleanupStagedLayer(v) - } + _ = s.imageRef.transport.store.CleanupStagedLayer(v) } return os.RemoveAll(s.directory) } @@ -310,6 +308,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces if err != nil { return private.UploadedBlob{}, err } + succeeded := false + defer func() { + if !succeeded { + _ = s.imageRef.transport.store.CleanupStagedLayer(out) + } + }() if out.TOCDigest == "" && out.UncompressedDigest == "" { return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set") @@ -332,6 +336,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces s.lockProtected.diffOutputs[options.LayerIndex] = out s.lock.Unlock() + succeeded = true return private.UploadedBlob{ Digest: blobDigest, Size: srcInfo.Size, @@ -377,14 +382,24 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige s.lock.Lock() defer s.lock.Unlock() - if options.SrcRef != nil { + if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil { // Check if we have the layer in the underlying additional layer store. - aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String()) + aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String()) if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err) } else if err == nil { - s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest() - s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer + alsTOCDigest := aLayer.TOCDigest() + if alsTOCDigest != options.TOCDigest { + // FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could + // probably go on reading the layer from other sources. + // + // Currently it should not be possible for alsTOCDigest to be set and not the expected value, but there’s + // not that much benefit to checking for equality — we trust the FUSE server to validate the digest either way. + return false, private.ReusedBlob{}, fmt.Errorf("additional layer for TOCDigest %q reports unexpected TOCDigest %q", + options.TOCDigest, alsTOCDigest) + } + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + s.lockProtected.indexToAdditionalLayer[*options.LayerIndex] = aLayer return true, private.ReusedBlob{ Digest: blobDigest, Size: aLayer.CompressedSize(), @@ -564,7 +579,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string { } // ordinaryImageID is a digest of a config, which is a JSON value. // To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON. - tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex() + tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded() logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID) return tocImageID } @@ -651,11 +666,11 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig defer s.lock.Unlock() if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found { - return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. + return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. } if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found { - return d.Hex(), true // This looks like chain IDs, and it uses the traditional value. + return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value. } return "", false } @@ -731,7 +746,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si id := layerIDComponent if !layerIDComponentStandalone || parentLayer != "" { - id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex() + id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded() } if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { // There's already a layer that should have the right contents, just reuse it. @@ -767,7 +782,13 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID) return nil, nil } + untrustedUncompressedDigest = d + // While the contents of the digest are untrusted, make sure at least the _format_ is valid, + // because we are going to write it to durable storage in expectedLayerDiffIDFlag . + if err := untrustedUncompressedDigest.Validate(); err != nil { + return nil, err + } } flags := make(map[string]interface{}) @@ -793,7 +814,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D } s.lock.Lock() - al, ok := s.lockProtected.blobAdditionalLayer[layerDigest] + al, ok := s.lockProtected.indexToAdditionalLayer[index] s.lock.Unlock() if ok { layer, err := al.PutAs(newLayerID, parentLayer, nil) @@ -930,7 +951,6 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D // nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil. // Either way this function does not need the protection of s.lock. if s.manifest == nil { - logrus.Debugf("Skipping commit for layer %d, manifest not yet available", layerIndex) return "", nil } @@ -1201,7 +1221,7 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob if err != nil { return err } - s.manifest = slices.Clone(manifestBlob) + s.manifest = bytes.Clone(manifestBlob) s.manifestDigest = digest return nil } diff --git a/vendor/github.com/containers/image/v5/storage/storage_reference.go b/vendor/github.com/containers/image/v5/storage/storage_reference.go index 6b7565fd8..2a1099f67 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_reference.go +++ b/vendor/github.com/containers/image/v5/storage/storage_reference.go @@ -6,6 +6,7 @@ package storage import ( "context" "fmt" + "slices" "strings" "github.com/containers/image/v5/docker/reference" @@ -15,7 +16,6 @@ import ( "github.com/containers/storage" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" - "golang.org/x/exp/slices" ) // A storageReference holds an arbitrary name and/or an ID, which is a 32-byte diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 27febe1d3..4f501fc22 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -107,12 +107,11 @@ func (s *storageImageSource) Close() error { // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) { +func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { // We need a valid digest value. digest := info.Digest - err = digest.Validate() - if err != nil { + if err := digest.Validate(); err != nil { return nil, 0, err } @@ -154,7 +153,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c // NOTE: the blob is first written to a temporary file and subsequently // closed. The intention is to keep the time we own the storage lock // as short as possible to allow other processes to access the storage. - rc, n, _, err = s.getBlobAndLayerID(digest, layers) + rc, n, _, err := s.getBlobAndLayerID(digest, layers) if err != nil { return nil, 0, err } @@ -177,7 +176,7 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c // On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically // cleaned up on process termination (or if the caller forgets to invoke Close()) // On older versions of Windows we will have to fallback to relying on the caller to invoke Close() - if err := os.Remove(tmpFile.Name()); err != nil { + if err := os.Remove(tmpFile.Name()); err == nil { tmpFileRemovePending = false } @@ -308,9 +307,6 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige if err != nil { return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err) } - if layer.UncompressedSize < 0 { - return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID) - } blobDigest := layer.UncompressedDigest if blobDigest == "" { @@ -332,12 +328,16 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err) } } + size := layer.UncompressedSize + if size < 0 { + size = -1 + } s.getBlobMutex.Lock() s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID s.getBlobMutex.Unlock() blobInfo := types.BlobInfo{ Digest: blobDigest, - Size: layer.UncompressedSize, + Size: size, MediaType: uncompressedLayerType, } physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...) @@ -453,10 +453,16 @@ func (s *storageImageSource) getSize() (int64, error) { if err != nil { return -1, err } - if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 { + if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || (layer.TOCDigest == "" && layer.UncompressedSize < 0) { return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID) } - sum += layer.UncompressedSize + // FIXME: We allow layer.UncompressedSize < 0 above, because currently images in an Additional Layer Store don’t provide that value. + // Right now, various callers in Podman (and, also, newImage in this package) don’t expect the size computation to fail. + // Should we update the callers, or do we need to continue returning inaccurate information here? Or should we pay the cost + // to compute the size from the diff? + if layer.UncompressedSize >= 0 { + sum += layer.UncompressedSize + } if layer.Parent == "" { break } diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go index d5578d9e8..4e7ef3bcf 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_reference.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_reference.go @@ -3,6 +3,7 @@ package tarball import ( "context" "fmt" + "maps" "os" "strings" @@ -10,7 +11,6 @@ import ( "github.com/containers/image/v5/internal/image" "github.com/containers/image/v5/types" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" ) // ConfigUpdater is an interface that ImageReferences for "tarball" images also diff --git a/vendor/github.com/containers/image/v5/tarball/tarball_src.go b/vendor/github.com/containers/image/v5/tarball/tarball_src.go index 6f9bfaf75..18d4cc2d2 100644 --- a/vendor/github.com/containers/image/v5/tarball/tarball_src.go +++ b/vendor/github.com/containers/image/v5/tarball/tarball_src.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "maps" "os" "runtime" "strings" @@ -18,7 +19,6 @@ import ( digest "github.com/opencontainers/go-digest" imgspecs "github.com/opencontainers/image-spec/specs-go" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/exp/maps" ) type tarballImageSource struct { @@ -117,7 +117,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System history = append(history, imgspecv1.History{ Created: &blobTime, - CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator), + CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Encoded(), os.PathSeparator), Comment: comment, }) // Use the mtime of the most recently modified file as the image's creation time. diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index a8f1c13ad..0f4b7e329 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -28,11 +28,11 @@ func ParseImageName(imgName string) (types.ImageReference, error) { // Keep this in sync with TransportFromImageName! transportName, withinTransport, valid := strings.Cut(imgName, ":") if !valid { - return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) + return nil, fmt.Errorf(`Invalid image name %q, expected colon-separated transport:reference`, imgName) } transport := transports.Get(transportName) if transport == nil { - return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, transportName) + return nil, fmt.Errorf(`Invalid image name %q, unknown transport %q`, imgName, transportName) } return transport.ParseReference(withinTransport) } diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 2be054158..9e0338158 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 30 + VersionMinor = 31 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 1 diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go index cd2241cbc..24e1d619d 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -123,9 +123,24 @@ func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error { } alg := jose.RSA_OAEP - switch key.(type) { + switch key := key.(type) { case *ecdsa.PublicKey: alg = jose.ECDH_ES_A256KW + case *jose.JSONWebKey: + if key.Algorithm != "" { + alg = jose.KeyAlgorithm(key.Algorithm) + switch alg { + /* accepted algorithms */ + case jose.RSA_OAEP: + case jose.RSA_OAEP_256: + case jose.ECDH_ES_A128KW: + case jose.ECDH_ES_A192KW: + case jose.ECDH_ES_A256KW: + /* all others are rejected */ + default: + return fmt.Errorf("%s is an unsupported JWE key algorithm", alg) + } + } } *joseRecipients = append(*joseRecipients, jose.Recipient{ diff --git a/vendor/github.com/containers/ocicrypt/utils/testing.go b/vendor/github.com/containers/ocicrypt/utils/testing.go index 69bb9d12f..050aa885e 100644 --- a/vendor/github.com/containers/ocicrypt/utils/testing.go +++ b/vendor/github.com/containers/ocicrypt/utils/testing.go @@ -38,6 +38,15 @@ func CreateRSAKey(bits int) (*rsa.PrivateKey, error) { return key, nil } +// CreateECDSAKey creates an elliptic curve key for the given curve +func CreateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, fmt.Errorf("ecdsa.GenerateKey failed: %w", err) + } + return key, nil +} + // CreateRSATestKey creates an RSA key of the given size and returns // the public and private key in PEM or DER format func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) { @@ -85,9 +94,9 @@ func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte // CreateECDSATestKey creates and elliptic curve key for the given curve and returns // the public and private key in DER format func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) { - key, err := ecdsa.GenerateKey(curve, rand.Reader) + key, err := CreateECDSAKey(curve) if err != nil { - return nil, nil, fmt.Errorf("ecdsa.GenerateKey failed: %w", err) + return nil, nil, err } pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey) diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 13bc20e7e..c2474c7f2 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -23,7 +23,7 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20240102t155643z-f39f38d13" + IMAGE_SUFFIX: "c20240513t140131z-f40f39d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -116,6 +116,7 @@ debian_testing_task: &debian_testing lint_task: + alias: lint env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: @@ -125,7 +126,7 @@ lint_task: folder: $GOPATH/pkg/mod build_script: | apt-get update - apt-get install -y libbtrfs-dev libdevmapper-dev + apt-get install -y libbtrfs-dev test_script: | make TAGS=regex_precompile local-validate make lint @@ -134,6 +135,7 @@ lint_task: # Update metadata on VM images referenced by this repository state meta_task: + alias: meta container: image: "quay.io/libpod/imgts:latest" @@ -156,6 +158,7 @@ meta_task: vendor_task: + alias: vendor container: image: golang modules_cache: @@ -166,13 +169,20 @@ vendor_task: cross_task: + alias: cross container: - image: golang:1.20 + image: golang:1.21 build_script: make cross -# Represent overall pass/fail status from required dependent tasks +# Status aggregator for all tests. This task simply ensures a defined +# set of tasks all passed, and allows confirming that based on the status +# of this task. success_task: + alias: success + # N/B: The prow merge-bot (tide) is sensitized to this exact name, DO NOT CHANGE IT. + # Ref: https://github.com/openshift/release/pull/49820 + name: "Total Success" depends_on: - lint - fedora_testing @@ -181,6 +191,6 @@ success_task: - vendor - cross container: - image: golang:1.20 + image: golang:1.21 clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed script: /bin/true diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 8461c0901..7ee2642fc 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -27,7 +27,7 @@ vendor-in-container NATIVETAGS := -AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libdm_tag.sh) $(shell ./hack/libsubid_tag.sh) +AUTOTAGS := $(shell ./hack/btrfs_tag.sh) $(shell ./hack/libsubid_tag.sh) BUILDFLAGS := -tags "$(AUTOTAGS) $(TAGS)" $(FLAGS) GO ?= go TESTFLAGS := $(shell $(GO) test -race $(BUILDFLAGS) ./pkg/stringutils 2>&1 > /dev/null && echo -race) diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 3f4830156..b7921ae87 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.53.0 +1.54.0 diff --git a/vendor/github.com/containers/storage/deprecated.go b/vendor/github.com/containers/storage/deprecated.go index 04972d838..76ae6328b 100644 --- a/vendor/github.com/containers/storage/deprecated.go +++ b/vendor/github.com/containers/storage/deprecated.go @@ -208,8 +208,6 @@ type LayerStore interface { ParentOwners(id string) (uids, gids []int, err error) ApplyDiff(to string, diff io.Reader) (int64, error) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) - CleanupStagingDirectory(stagingDirectory string) error - ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error DifferTarget(id string) (string, error) LoadLocked() error PutAdditionalLayer(id string, parentLayer *Layer, names []string, aLayer drivers.AdditionalLayer) (layer *Layer, err error) diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index 0b1766210..e00314d3f 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -41,6 +41,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/locker" mountpk "github.com/containers/storage/pkg/mount" @@ -243,7 +244,7 @@ func (a *Driver) Metadata(id string) (map[string]string, error) { // Exists returns true if the given id is registered with // this driver func (a *Driver) Exists(id string) bool { - if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + if err := fileutils.Lexists(path.Join(a.rootPath(), "layers", id)); err != nil { return false } return true @@ -431,7 +432,7 @@ func atomicRemove(source string) error { case err == nil, os.IsNotExist(err): case os.IsExist(err): // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove - if _, e := os.Stat(source); !os.IsNotExist(e) { + if e := fileutils.Exists(source); !os.IsNotExist(e) { return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err) } default: diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index fd93d4e84..11ae56364 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -32,6 +32,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" @@ -589,11 +590,11 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error { // Remove the filesystem with given id. func (d *Driver) Remove(id string) error { dir := d.subvolumesDirID(id) - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { return err } quotasDir := d.quotasDirID(id) - if _, err := os.Stat(quotasDir); err == nil { + if err := fileutils.Exists(quotasDir); err == nil { if err := os.Remove(quotasDir); err != nil { return err } @@ -669,7 +670,7 @@ func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { // Exists checks if the id exists in the filesystem. func (d *Driver) Exists(id string) bool { dir := d.subvolumesDirID(id) - _, err := os.Stat(dir) + err := fileutils.Exists(dir) return err == nil } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go deleted file mode 100644 index 388602b63..000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ /dev/null @@ -1,254 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/sirupsen/logrus" -) - -type directLVMConfig struct { - Device string - ThinpPercent uint64 - ThinpMetaPercent uint64 - AutoExtendPercent uint64 - AutoExtendThreshold uint64 - MetaDataSize string -} - -var ( - errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") - errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") - errMissingSetupDevice = errors.New("must provide device path in `dm.directlvm_device` in order to configure direct-lvm") -) - -func validateLVMConfig(cfg directLVMConfig) error { - if cfg.Device == "" { - return errMissingSetupDevice - } - if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { - return errThinpPercentMissing - } - - if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { - return errThinpPercentTooBig - } - return nil -} - -func checkDevAvailable(dev string) error { - lvmScan, err := exec.LookPath("lvmdiskscan") - if err != nil { - logrus.Debugf("could not find lvmdiskscan: %v", err) - return nil - } - - out, err := exec.Command(lvmScan).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - if !bytes.Contains(out, []byte(dev)) { - return fmt.Errorf("%s is not available for use with devicemapper", dev) - } - return nil -} - -func checkDevInVG(dev string) error { - pvDisplay, err := exec.LookPath("pvdisplay") - if err != nil { - logrus.Debugf("could not find pvdisplay: %v", err) - return nil - } - - out, err := exec.Command(pvDisplay, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) - for scanner.Scan() { - fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") - if len(fields) > 1 { - // got "VG Name" line" - vg := strings.TrimSpace(fields[1]) - if len(vg) > 0 { - return fmt.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) - } - logrus.Error(fields) - break - } - } - return nil -} - -func checkDevHasFS(dev string) error { - blkid, err := exec.LookPath("blkid") - if err != nil { - logrus.Debugf("could not find blkid %v", err) - return nil - } - - out, err := exec.Command(blkid, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - fields := bytes.Fields(out) - for _, f := range fields { - kv := bytes.Split(f, []byte{'='}) - if bytes.Equal(kv[0], []byte("TYPE")) { - v := bytes.Trim(kv[1], "\"") - if len(v) > 0 { - return fmt.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) - } - return nil - } - } - return nil -} - -func verifyBlockDevice(dev string, force bool) error { - absPath, err := filepath.Abs(dev) - if err != nil { - return fmt.Errorf("unable to get absolute path for %s: %s", dev, err) - } - realPath, err := filepath.EvalSymlinks(absPath) - if err != nil { - return fmt.Errorf("failed to canonicalise path for %s: %s", dev, err) - } - if err := checkDevAvailable(absPath); err != nil { - logrus.Infof("block device '%s' not available, checking '%s'", absPath, realPath) - if err := checkDevAvailable(realPath); err != nil { - return fmt.Errorf("neither '%s' nor '%s' are in the output of lvmdiskscan, can't use device", absPath, realPath) - } - } - if err := checkDevInVG(realPath); err != nil { - return err - } - - if force { - return nil - } - - if err := checkDevHasFS(realPath); err != nil { - return err - } - return nil -} - -func readLVMConfig(root string) (directLVMConfig, error) { - var cfg directLVMConfig - - p := filepath.Join(root, "setup-config.json") - b, err := os.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - return cfg, nil - } - return cfg, fmt.Errorf("reading existing setup config: %w", err) - } - - // check if this is just an empty file, no need to produce a json error later if so - if len(b) == 0 { - return cfg, nil - } - if err := json.Unmarshal(b, &cfg); err != nil { - return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err) - } - return cfg, nil -} - -func writeLVMConfig(root string, cfg directLVMConfig) error { - p := filepath.Join(root, "setup-config.json") - b, err := json.Marshal(cfg) - if err != nil { - return fmt.Errorf("marshalling direct lvm config: %w", err) - } - if err := os.WriteFile(p, b, 0o600); err != nil { - return fmt.Errorf("writing direct lvm config to file: %w", err) - } - return nil -} - -func setupDirectLVM(cfg directLVMConfig) error { - lvmProfileDir := "/etc/lvm/profile" - binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} - - for _, bin := range binaries { - if _, err := exec.LookPath(bin); err != nil { - return fmt.Errorf("looking up command `"+bin+"` while setting up direct lvm: %w", err) - } - } - - err := os.MkdirAll(lvmProfileDir, 0o755) - if err != nil { - return fmt.Errorf("creating lvm profile directory: %w", err) - } - - if cfg.AutoExtendPercent == 0 { - cfg.AutoExtendPercent = 20 - } - - if cfg.AutoExtendThreshold == 0 { - cfg.AutoExtendThreshold = 80 - } - - if cfg.ThinpPercent == 0 { - cfg.ThinpPercent = 95 - } - if cfg.ThinpMetaPercent == 0 { - cfg.ThinpMetaPercent = 1 - } - if cfg.MetaDataSize == "" { - cfg.MetaDataSize = "128k" - } - - out, err := exec.Command("pvcreate", "--metadatasize", cfg.MetaDataSize, "-f", cfg.Device).CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - - out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - - out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() - if err != nil { - return fmt.Errorf("%v: %w", string(out), err) - } - - profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0o600) - if err != nil { - return fmt.Errorf("writing storage thinp autoextend profile: %w", err) - } - - out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() - if err != nil { - return fmt.Errorf("%s: %w", string(out), err) - } - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go deleted file mode 100644 index 5d8df8a78..000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ /dev/null @@ -1,2888 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import ( - "bufio" - "errors" - "fmt" - "io" - "io/fs" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - "sync" - "time" - - graphdriver "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/devicemapper" - "github.com/containers/storage/pkg/dmesg" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/loopback" - "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/parsers/kernel" - units "github.com/docker/go-units" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 - driverDeferredRemovalSupport = false - enableDeferredRemoval = false - enableDeferredDeletion = false - userBaseSize = false - defaultMinFreeSpacePercent uint32 = 10 - lvmSetupConfigForce bool -) - -const ( - deviceSetMetaFile = "deviceset-metadata" - transactionMetaFile = "transaction-metadata" - xfs = "xfs" - ext4 = "ext4" - base = "base" -) - -type transaction struct { - OpenTransactionID uint64 `json:"open_transaction_id"` - DeviceIDHash string `json:"device_hash"` - DeviceID int `json:"device_id"` -} - -type devInfo struct { - Hash string `json:"-"` - DeviceID int `json:"device_id"` - Size uint64 `json:"size"` - TransactionID uint64 `json:"transaction_id"` - Initialized bool `json:"initialized"` - Deleted bool `json:"deleted"` - devices *DeviceSet - - // The global DeviceSet lock guarantees that we serialize all - // the calls to libdevmapper (which is not threadsafe), but we - // sometimes release that lock while sleeping. In that case - // this per-device lock is still held, protecting against - // other accesses to the device that we're doing the wait on. - // - // WARNING: In order to avoid AB-BA deadlocks when releasing - // the global lock while holding the per-device locks all - // device locks must be acquired *before* the device lock, and - // multiple device locks should be acquired parent before child. - lock sync.Mutex -} - -type metaData struct { - Devices map[string]*devInfo `json:"Devices"` -} - -// DeviceSet holds information about list of devices -type DeviceSet struct { - metaData `json:"-"` - sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper - root string - devicePrefix string - TransactionID uint64 `json:"-"` - NextDeviceID int `json:"next_device_id"` - deviceIDMap []byte - - // Options - dataLoopbackSize int64 - metaDataSize string - metaDataLoopbackSize int64 - baseFsSize uint64 - filesystem string - mountOptions string - mkfsArgs []string - dataDevice string // block or loop dev - dataLoopFile string // loopback file, if used - metadataDevice string // block or loop dev - metadataLoopFile string // loopback file, if used - doBlkDiscard bool - thinpBlockSize uint32 - thinPoolDevice string - transaction `json:"-"` - overrideUdevSyncCheck bool - deferredRemove bool // use deferred removal - deferredDelete bool // use deferred deletion - BaseDeviceUUID string // save UUID of base device - BaseDeviceFilesystem string // save filesystem of base device - nrDeletedDevices uint // number of deleted devices - deletionWorkerTicker *time.Ticker - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - minFreeSpacePercent uint32 // min free space percentage in thinpool - xfsNospaceRetries string // max retries when xfs receives ENOSPC - lvmSetupConfig directLVMConfig -} - -// DiskUsage contains information about disk usage and is used when reporting Status of a device. -type DiskUsage struct { - // Used bytes on the disk. - Used uint64 - // Total bytes on the disk. - Total uint64 - // Available bytes on the disk. - Available uint64 -} - -// Status returns the information about the device. -type Status struct { - // PoolName is the name of the data pool. - PoolName string - // DataFile is the actual block device for data. - DataFile string - // DataLoopback loopback file, if used. - DataLoopback string - // MetadataFile is the actual block device for metadata. - MetadataFile string - // MetadataLoopback is the loopback file, if used. - MetadataLoopback string - // Data is the disk used for data. - Data DiskUsage - // Metadata is the disk used for meta data. - Metadata DiskUsage - // BaseDeviceSize is base size of container and image - BaseDeviceSize uint64 - // BaseDeviceFS is backing filesystem. - BaseDeviceFS string - // SectorSize size of the vector. - SectorSize uint64 - // UdevSyncSupported is true if sync is supported. - UdevSyncSupported bool - // DeferredRemoveEnabled is true then the device is not unmounted. - DeferredRemoveEnabled bool - // True if deferred deletion is enabled. This is different from - // deferred removal. "removal" means that device mapper device is - // deactivated. Thin device is still in thin pool and can be activated - // again. But "deletion" means that thin device will be deleted from - // thin pool and it can't be activated again. - DeferredDeleteEnabled bool - DeferredDeletedDeviceCount uint - MinFreeSpace uint64 -} - -// Structure used to export image/container metadata in inspect. -type deviceMetadata struct { - deviceID int - deviceSize uint64 // size in bytes - deviceName string // Device name as used during activation -} - -// DevStatus returns information about device mounted containing its id, size and sector information. -type DevStatus struct { - // DeviceID is the id of the device. - DeviceID int - // Size is the size of the filesystem. - Size uint64 - // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. - TransactionID uint64 - // SizeInSectors indicates the size of the sectors allocated. - SizeInSectors uint64 - // MappedSectors indicates number of mapped sectors. - MappedSectors uint64 - // HighestMappedSector is the pointer to the highest mapped sector. - HighestMappedSector uint64 -} - -func getDevName(name string) string { - return "/dev/mapper/" + name -} - -func (info *devInfo) Name() string { - hash := info.Hash - if hash == "" { - hash = base - } - return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) -} - -func (info *devInfo) DevName() string { - return getDevName(info.Name()) -} - -func (devices *DeviceSet) loopbackDir() string { - return path.Join(devices.root, "devicemapper") -} - -func (devices *DeviceSet) metadataDir() string { - return path.Join(devices.root, "metadata") -} - -func (devices *DeviceSet) metadataFile(info *devInfo) string { - file := info.Hash - if file == "" { - file = base - } - return path.Join(devices.metadataDir(), file) -} - -func (devices *DeviceSet) transactionMetaFile() string { - return path.Join(devices.metadataDir(), transactionMetaFile) -} - -func (devices *DeviceSet) deviceSetMetaFile() string { - return path.Join(devices.metadataDir(), deviceSetMetaFile) -} - -func (devices *DeviceSet) oldMetadataFile() string { - return path.Join(devices.loopbackDir(), "json") -} - -func (devices *DeviceSet) getPoolName() string { - if devices.thinPoolDevice == "" { - return devices.devicePrefix + "-pool" - } - return devices.thinPoolDevice -} - -func (devices *DeviceSet) getPoolDevName() string { - return getDevName(devices.getPoolName()) -} - -func (devices *DeviceSet) hasImage(name string) bool { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - _, err := os.Stat(filename) - return err == nil -} - -// ensureImage creates a sparse file of bytes at the path -// /devicemapper/. -// If the file already exists and new size is larger than its current size, it grows to the new size. -// Either way it returns the full path. -func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return "", err - } - if err := idtools.MkdirAllAs(dirname, 0o700, uid, gid); err != nil { - return "", err - } - - if fi, err := os.Stat(filename); err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) - if err != nil { - return "", err - } - defer file.Close() - - if err := file.Truncate(size); err != nil { - return "", err - } - } else { - if fi.Size() < size { - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0o600) - if err != nil { - return "", err - } - defer file.Close() - if err := file.Truncate(size); err != nil { - return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %w", filename, err) - } - } else if fi.Size() > size { - logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) - } - } - return filename, nil -} - -func (devices *DeviceSet) allocateTransactionID() uint64 { - devices.OpenTransactionID = devices.TransactionID + 1 - return devices.OpenTransactionID -} - -func (devices *DeviceSet) updatePoolTransactionID() error { - if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { - return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) - } - devices.TransactionID = devices.OpenTransactionID - return nil -} - -func (devices *DeviceSet) removeMetadata(info *devInfo) error { - if err := os.RemoveAll(devices.metadataFile(info)); err != nil { - return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) - } - return nil -} - -// Given json data and file path, write it to disk -func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { - tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp") - if err != nil { - return fmt.Errorf("devmapper: Error creating metadata file: %s", err) - } - - n, err := tmpFile.Write(jsonData) - if err != nil { - return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) - } - if n < len(jsonData) { - return io.ErrShortWrite - } - if err := tmpFile.Sync(); err != nil { - return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) - } - if err := os.Rename(tmpFile.Name(), filePath); err != nil { - return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) - } - - return nil -} - -func (devices *DeviceSet) saveMetadata(info *devInfo) error { - jsonData, err := json.Marshal(info) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { - return err - } - return nil -} - -func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { - var mask byte - i := deviceID % 8 - mask = 1 << uint(i) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask -} - -func (devices *DeviceSet) markDeviceIDFree(deviceID int) { - var mask byte - i := deviceID % 8 - mask = ^(1 << uint(i)) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask -} - -func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { - var mask byte - i := deviceID % 8 - mask = (1 << uint(i)) - return (devices.deviceIDMap[deviceID/8] & mask) == 0 -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { - info := devices.Devices[hash] - if info == nil { - info = devices.loadMetadata(hash) - if info == nil { - return nil, fmt.Errorf("devmapper: Unknown device %s", hash) - } - - devices.Devices[hash] = info - } - return info, nil -} - -func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) - return info, err -} - -// This function relies on that device hash map has been loaded in advance. -// Should be called with devices.Lock() held. -func (devices *DeviceSet) constructDeviceIDMap() { - logrus.Debug("devmapper: constructDeviceIDMap()") - defer logrus.Debug("devmapper: constructDeviceIDMap() END") - - for _, info := range devices.Devices { - devices.markDeviceIDUsed(info.DeviceID) - logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) - } -} - -func (devices *DeviceSet) deviceFileWalkFunction(path string, name string) error { - // Skip some of the meta files which are not device files. - if strings.HasSuffix(name, ".migrated") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if strings.HasPrefix(name, ".") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if name == deviceSetMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if name == transactionMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - logrus.Debugf("devmapper: Loading data for file %s", path) - - // Include deleted devices also as cleanup delete device logic - // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(name); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%w", name, err) - } - - return nil -} - -func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.Debug("devmapper: loadDeviceFilesOnStart()") - defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - - scan := func(path string, d fs.DirEntry, err error) error { - if err != nil { - logrus.Debugf("devmapper: Can't walk the file %s: %v", path, err) - return nil - } - - // Skip any directories - if d.IsDir() { - return nil - } - - return devices.deviceFileWalkFunction(path, d.Name()) - } - - return filepath.WalkDir(devices.metadataDir(), scan) -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(hash string) error { - logrus.Debugf("devmapper: unregisterDevice(%v)", hash) - info := &devInfo{ - Hash: hash, - } - - delete(devices.Devices, hash) - - if err := devices.removeMetadata(info); err != nil { - logrus.Debugf("devmapper: Error removing metadata: %s", err) - return err - } - - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { - logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) - info := &devInfo{ - Hash: hash, - DeviceID: id, - Size: size, - TransactionID: transactionID, - Initialized: false, - devices: devices, - } - - devices.Devices[hash] = info - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, hash) - return nil, err - } - - return info, nil -} - -func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { - logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) - - if info.Deleted && !ignoreDeleted { - return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) - } - - // Make sure deferred removal on device is canceled, if one was - // scheduled. - if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { - return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) - } - - if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { - return nil - } - - return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) -} - -// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error -func xfsSupported() error { - // Make sure mkfs.xfs is available - if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return err // error text is descriptive enough - } - - // Check if kernel supports xfs filesystem or not. - exec.Command("modprobe", xfs).Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return fmt.Errorf("checking for xfs support: %w", err) - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.HasSuffix(s.Text(), "\txfs") { - return nil - } - } - - if err := s.Err(); err != nil { - return fmt.Errorf("checking for xfs support: %w", err) - } - - return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) -} - -func determineDefaultFS() string { - err := xfsSupported() - if err == nil { - return xfs - } - - logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to %s filesystem", ext4, err) - return ext4 -} - -// mkfsOptions tries to figure out whether some additional mkfs options are required -func mkfsOptions(fs string) []string { - if fs == xfs && !kernel.CheckKernelVersion(3, 16, 0) { - // For kernels earlier than 3.16 (and newer xfsutils), - // some xfs features need to be explicitly disabled. - return []string{"-m", "crc=0,finobt=0"} - } - - return []string{} -} - -func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { - devname := info.DevName() - - if devices.filesystem == "" { - devices.filesystem = determineDefaultFS() - } - if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { - return err - } - - args := mkfsOptions(devices.filesystem) - args = append(args, devices.mkfsArgs...) - args = append(args, devname) - - logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) - defer func() { - if err != nil { - logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) - } else { - logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) - } - }() - - switch devices.filesystem { - case xfs: - err = exec.Command("mkfs.xfs", args...).Run() - case ext4: - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() - if err != nil { - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() - } - if err != nil { - return err - } - err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() - default: - err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) - } - return -} - -func (devices *DeviceSet) migrateOldMetaData() error { - // Migrate old metadata file - jsonData, err := os.ReadFile(devices.oldMetadataFile()) - if err != nil && !os.IsNotExist(err) { - return err - } - - if jsonData != nil { - m := metaData{Devices: make(map[string]*devInfo)} - - if err := json.Unmarshal(jsonData, &m); err != nil { - return err - } - - for hash, info := range m.Devices { - info.Hash = hash - devices.saveMetadata(info) - } - if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { - return err - } - - } - - return nil -} - -// Cleanup deleted devices. It assumes that all the devices have been -// loaded in the hash table. -func (devices *DeviceSet) cleanupDeletedDevices() error { - devices.Lock() - - // If there are no deleted devices, there is nothing to do. - if devices.nrDeletedDevices == 0 { - devices.Unlock() - return nil - } - - var deletedDevices []*devInfo - - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) - deletedDevices = append(deletedDevices, info) - } - - // Delete the deleted devices. DeleteDevice() first takes the info lock - // and then devices.Lock(). So drop it to avoid deadlock. - devices.Unlock() - - for _, info := range deletedDevices { - // This will again try deferred deletion. - if err := devices.DeleteDevice(info.Hash, false); err != nil { - logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) - } - } - - return nil -} - -func (devices *DeviceSet) countDeletedDevices() { - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - devices.nrDeletedDevices++ - } -} - -func (devices *DeviceSet) startDeviceDeletionWorker() { - // Deferred deletion is not enabled. Don't do anything. - if !devices.deferredDelete { - return - } - - // Cleanup right away if there are any leaked devices. Note this - // could cause some slowdown for process startup, if there were - // Leaked devices - devices.cleanupDeletedDevices() - logrus.Debug("devmapper: Worker to cleanup deleted devices started") - for range devices.deletionWorkerTicker.C { - devices.cleanupDeletedDevices() - } -} - -func (devices *DeviceSet) initMetaData() error { - devices.Lock() - defer devices.Unlock() - - if err := devices.migrateOldMetaData(); err != nil { - return err - } - - _, transactionID, _, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - - devices.TransactionID = transactionID - - if err := devices.loadDeviceFilesOnStart(); err != nil { - return fmt.Errorf("devmapper: Failed to load device files:%w", err) - } - - devices.constructDeviceIDMap() - devices.countDeletedDevices() - - if err := devices.processPendingTransaction(); err != nil { - return err - } - - // Start a goroutine to cleanup Deleted Devices - go devices.startDeviceDeletionWorker() - return nil -} - -func (devices *DeviceSet) incNextDeviceID() { - // IDs are 24bit, so wrap around - devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID -} - -func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { - devices.incNextDeviceID() - for i := 0; i <= maxDeviceID; i++ { - if devices.isDeviceIDFree(devices.NextDeviceID) { - devices.markDeviceIDUsed(devices.NextDeviceID) - return devices.NextDeviceID, nil - } - devices.incNextDeviceID() - } - - return 0, fmt.Errorf("devmapper: Unable to find a free device ID") -} - -func (devices *DeviceSet) poolHasFreeSpace() error { - if devices.minFreeSpacePercent == 0 { - return nil - } - - _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err != nil { - return err - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeData < 1 { - minFreeData = 1 - } - dataFree := dataTotal - dataUsed - if dataFree < minFreeData { - return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) - } - - minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeMetadata < 1 { - minFreeMetadata = 1 - } - - metadataFree := metadataTotal - metadataUsed - if metadataFree < minFreeMetadata { - return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) - } - - return nil -} - -func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - for { - if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating device: %s", err) - devices.markDeviceIDFree(deviceID) - return nil, err - } - break - } - - logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) - info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) - if err != nil { - _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - return info, nil -} - -func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { - var ( - devinfo *devicemapper.Info - err error - ) - - if err = devices.poolHasFreeSpace(); err != nil { - return err - } - - if devices.deferredRemove { - devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) - if err != nil { - return err - } - if devinfo != nil && devinfo.DeferredRemove != 0 { - err = devices.cancelDeferredRemoval(baseInfo) - if err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if !errors.Is(err, devicemapper.ErrEnxio) { - return err - } - devinfo = nil - } else { - defer devices.deactivateDevice(baseInfo) - } - } - } else { - devinfo, err = devicemapper.GetInfo(baseInfo.Name()) - if err != nil { - return err - } - } - - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { - return err - } - defer devicemapper.ResumeDevice(baseInfo.Name()) - } - - if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { - return err - } - - return nil -} - -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - - for { - if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating snap device: %s", err) - devices.markDeviceIDFree(deviceID) - return err - } - break - } - - if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - logrus.Debugf("devmapper: Error registering device: %s", err) - return err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - return nil -} - -func (devices *DeviceSet) loadMetadata(hash string) *devInfo { - info := &devInfo{Hash: hash, devices: devices} - - jsonData, err := os.ReadFile(devices.metadataFile(info)) - if err != nil { - logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) - return nil - } - - if err := json.Unmarshal(jsonData, &info); err != nil { - logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) - return nil - } - - if info.DeviceID > maxDeviceID { - logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) - return nil - } - - return info -} - -func getDeviceUUID(device string) (string, error) { - out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() - if err != nil { - return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%w", device, err) - } - - uuid := strings.TrimSuffix(string(out), "\n") - uuid = strings.TrimSpace(uuid) - logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) - return uuid, nil -} - -func (devices *DeviceSet) getBaseDeviceSize() uint64 { - info, _ := devices.lookupDevice("") - if info == nil { - return 0 - } - return info.Size -} - -func (devices *DeviceSet) getBaseDeviceFS() string { - return devices.BaseDeviceFilesystem -} - -func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if devices.filesystem == "" { - devices.filesystem = determineDefaultFS() - } - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - if devices.BaseDeviceUUID != uuid { - return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) - } - - if devices.BaseDeviceFilesystem == "" { - fsType, err := ProbeFsType(baseInfo.DevName()) - if err != nil { - return err - } - if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { - return err - } - } - - // If user specified a filesystem using dm.fs option and current - // file system of base image is not same, warn user that dm.fs - // will be ignored. - if devices.BaseDeviceFilesystem != devices.filesystem { - logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) - devices.filesystem = devices.BaseDeviceFilesystem - } - return nil -} - -func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { - devices.BaseDeviceFilesystem = fs - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - devices.BaseDeviceUUID = uuid - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) createBaseImage() error { - logrus.Debug("devmapper: Initializing base device-mapper thin volume") - - // Create initial device - info, err := devices.createRegisterDevice("") - if err != nil { - return err - } - - logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return err - } - - if err := devices.createFilesystem(info); err != nil { - return err - } - - info.Initialized = true - if err := devices.saveMetadata(info); err != nil { - info.Initialized = false - return err - } - - if err := devices.saveBaseDeviceUUID(info); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) - } - - return nil -} - -// Returns if thin pool device exists or not. If device exists, also makes -// sure it is a thin pool device and not some other type of device. -func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { - logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) - - info, err := devicemapper.GetInfo(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %w", thinPoolDevice, err) - } - - // Device does not exist. - if info.Exists == 0 { - return false, nil - } - - _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %w", thinPoolDevice, err) - } - - if deviceType != "thin-pool" { - return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) - } - - return true, nil -} - -func (devices *DeviceSet) checkThinPool() error { - _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - if dataUsed != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", - devices.thinPoolDevice) - } - if transactionID != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", - devices.thinPoolDevice) - } - return nil -} - -// Base image is initialized properly. Either save UUID for first time (for -// upgrade case or verify UUID. -func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { - // If BaseDeviceUUID is nil (upgrade case), save it and return success. - if devices.BaseDeviceUUID == "" { - if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%w", err) - } - return nil - } - - if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { - return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %w", err) - } - - return nil -} - -func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { - if !userBaseSize { - return nil - } - - if devices.baseFsSize < devices.getBaseDeviceSize() { - return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) - } - - if devices.baseFsSize == devices.getBaseDeviceSize() { - return nil - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - info.Size = devices.baseFsSize - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, info.Hash) - return err - } - - return devices.growFS(info) -} - -func (devices *DeviceSet) growFS(info *devInfo) error { - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("activating devmapper device: %s", err) - } - - defer devices.deactivateDevice(info) - - fsMountPoint := "/run/containers/storage/mnt" - if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { - if err := os.MkdirAll(fsMountPoint, 0o700); err != nil { - return err - } - defer os.RemoveAll(fsMountPoint) - } - - options := "" - if devices.BaseDeviceFilesystem == xfs { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - options = joinMountOptions(options, devices.mountOptions) - - if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) - } - - defer func() { - if err := mount.Unmount(fsMountPoint); err != nil { - logrus.Warnf("devmapper.growFS cleanup error: %v", err) - } - }() - - switch devices.BaseDeviceFilesystem { - case ext4: - if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) - } - case xfs: - if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("failed to grow rootfs:%s:%w", string(out), err) - } - default: - return fmt.Errorf("unsupported filesystem type %s", devices.BaseDeviceFilesystem) - } - return nil -} - -func (devices *DeviceSet) setupBaseImage() error { - oldInfo, _ := devices.lookupDeviceWithLock("") - - // base image already exists. If it is initialized properly, do UUID - // verification and return. Otherwise remove image and set it up - // fresh. - - if oldInfo != nil { - if oldInfo.Initialized && !oldInfo.Deleted { - if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { - return err - } - - if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { - return err - } - - return nil - } - - logrus.Debug("devmapper: Removing uninitialized base image") - // If previous base device is in deferred delete state, - // that needs to be cleaned up first. So don't try - // deferred deletion. - if err := devices.DeleteDevice("", true); err != nil { - return err - } - } - - // If we are setting up base image for the first time, make sure - // thin pool is empty. - if devices.thinPoolDevice != "" && oldInfo == nil { - if err := devices.checkThinPool(); err != nil { - return err - } - } - - // Create new base image device - if err := devices.createBaseImage(); err != nil { - return err - } - - return nil -} - -func setCloseOnExec(name string) { - fileEntries, _ := os.ReadDir("/proc/self/fd") - for _, e := range fileEntries { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name())) - if link == name { - fd, err := strconv.Atoi(e.Name()) - if err == nil { - unix.CloseOnExec(fd) - } - } - } -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// ResizePool increases the size of the pool. -func (devices *DeviceSet) ResizePool(size int64) error { - dirname := devices.loopbackDir() - datafilename := path.Join(dirname, "data") - if len(devices.dataDevice) > 0 { - datafilename = devices.dataDevice - } - metadatafilename := path.Join(dirname, "metadata") - if len(devices.metadataDevice) > 0 { - metadatafilename = devices.metadataDevice - } - - datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) - if datafile == nil { - return err - } - defer datafile.Close() - - fi, err := datafile.Stat() - if fi == nil { - return err - } - - if fi.Size() > size { - return fmt.Errorf("devmapper: Can't shrink file") - } - - dataloopback := loopback.FindLoopDeviceFor(datafile) - if dataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) - } - defer dataloopback.Close() - - metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) - if metadatafile == nil { - return err - } - defer metadatafile.Close() - - metadataloopback := loopback.FindLoopDeviceFor(metadatafile) - if metadataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) - } - defer metadataloopback.Close() - - // Grow loopback file - if err := datafile.Truncate(size); err != nil { - return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) - } - - // Reload size for loopback device - if err := loopback.SetCapacity(dataloopback); err != nil { - return fmt.Errorf("unable to update loopback capacity: %s", err) - } - - // Suspend the pool - if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) - } - - // Reload with the new block sizes - if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { - return fmt.Errorf("devmapper: Unable to reload pool: %s", err) - } - - // Resume the pool - if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to resume pool: %s", err) - } - - return nil -} - -func (devices *DeviceSet) loadTransactionMetaData() error { - jsonData, err := os.ReadFile(devices.transactionMetaFile()) - if err != nil { - // There is no active transaction. This will be the case - // during upgrade. - if os.IsNotExist(err) { - devices.OpenTransactionID = devices.TransactionID - return nil - } - return err - } - - json.Unmarshal(jsonData, &devices.transaction) - return nil -} - -func (devices *DeviceSet) saveTransactionMetaData() error { - jsonData, err := json.Marshal(&devices.transaction) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) -} - -func (devices *DeviceSet) removeTransactionMetaData() error { - return os.RemoveAll(devices.transactionMetaFile()) -} - -func (devices *DeviceSet) rollbackTransaction() error { - logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) - - // A device id might have already been deleted before transaction - // closed. In that case this call will fail. Just leave a message - // in case of failure. - if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { - logrus.Errorf("devmapper: Unable to delete device: %s", err) - } - - dinfo := &devInfo{Hash: devices.DeviceIDHash} - if err := devices.removeMetadata(dinfo); err != nil { - logrus.Errorf("devmapper: Unable to remove metadata: %s", err) - } else { - devices.markDeviceIDFree(devices.DeviceID) - } - - if err := devices.removeTransactionMetaData(); err != nil { - logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) - } - - return nil -} - -func (devices *DeviceSet) processPendingTransaction() error { - if err := devices.loadTransactionMetaData(); err != nil { - return err - } - - // If there was open transaction but pool transaction ID is same - // as open transaction ID, nothing to roll back. - if devices.TransactionID == devices.OpenTransactionID { - return nil - } - - // If open transaction ID is less than pool transaction ID, something - // is wrong. Bail out. - if devices.OpenTransactionID < devices.TransactionID { - logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) - return nil - } - - // Pool transaction ID is not same as open transaction. There is - // a transaction which was not completed. - if err := devices.rollbackTransaction(); err != nil { - return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) - } - - devices.OpenTransactionID = devices.TransactionID - return nil -} - -func (devices *DeviceSet) loadDeviceSetMetaData() error { - jsonData, err := os.ReadFile(devices.deviceSetMetaFile()) - if err != nil { - // For backward compatibility return success if file does - // not exist. - if os.IsNotExist(err) { - return nil - } - return err - } - - return json.Unmarshal(jsonData, devices) -} - -func (devices *DeviceSet) saveDeviceSetMetaData() error { - jsonData, err := json.Marshal(devices) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) -} - -func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { - devices.allocateTransactionID() - devices.DeviceIDHash = hash - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) refreshTransaction(DeviceID int) error { - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) closeTransaction() error { - if err := devices.updatePoolTransactionID(); err != nil { - logrus.Debug("devmapper: Failed to close Transaction") - return err - } - return nil -} - -func determineDriverCapabilities(version string) error { - // Kernel driver version >= 4.27.0 support deferred removal - - logrus.Debugf("devicemapper: kernel dm driver version is %s", version) - - versionSplit := strings.Split(version, ".") - major, err := strconv.Atoi(versionSplit[0]) - if err != nil { - return fmt.Errorf("unable to parse driver major version %q as a number: %w", versionSplit[0], graphdriver.ErrNotSupported) - } - - if major > 4 { - driverDeferredRemovalSupport = true - return nil - } - - if major < 4 { - return nil - } - - minor, err := strconv.Atoi(versionSplit[1]) - if err != nil { - return fmt.Errorf("unable to parse driver minor version %q as a number: %w", versionSplit[1], graphdriver.ErrNotSupported) - } - - /* - * If major is 4 and minor is 27, then there is no need to - * check for patch level as it can not be less than 0. - */ - if minor >= 27 { - driverDeferredRemovalSupport = true - return nil - } - - return nil -} - -// Determine the major and minor number of loopback device -func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - var stat unix.Stat_t - err := unix.Stat(file.Name(), &stat) - if err != nil { - return 0, 0, err - } - - dev := stat.Rdev - majorNum := major(uint64(dev)) - minorNum := minor(uint64(dev)) - - logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) - return majorNum, minorNum, nil -} - -// Given a file which is backing file of a loop back device, find the -// loopback device name and its major/minor number. -func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { - file, err := os.Open(filename) - if err != nil { - logrus.Debugf("devmapper: Failed to open file %s", filename) - return "", 0, 0, err - } - - defer file.Close() - loopbackDevice := loopback.FindLoopDeviceFor(file) - if loopbackDevice == nil { - return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) - } - defer loopbackDevice.Close() - - Major, Minor, err := getDeviceMajorMinor(loopbackDevice) - if err != nil { - return "", 0, 0, err - } - return loopbackDevice.Name(), Major, Minor, nil -} - -// Get the major/minor numbers of thin pool data and metadata devices -func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { - var params, poolDataMajMin, poolMetadataMajMin string - - _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) - if err != nil { - return 0, 0, 0, 0, err - } - - if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { - return 0, 0, 0, 0, err - } - - logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) - - poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") - poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") - poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil -} - -func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { - poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() - if err != nil { - return err - } - - dirname := devices.loopbackDir() - - // data device has not been passed in. So there should be a data file - // which is being mounted as loop device. - if devices.dataDevice == "" { - datafilename := path.Join(dirname, "data") - dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) - if err != nil { - return err - } - - // Compare the two - if poolDataMajor == dataMajor && poolDataMinor == dataMinor { - devices.dataDevice = dataLoopDevice - devices.dataLoopFile = datafilename - } - - } - - // metadata device has not been passed in. So there should be a - // metadata file which is being mounted as loop device. - if devices.metadataDevice == "" { - metadatafilename := path.Join(dirname, "metadata") - metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) - if err != nil { - return err - } - if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { - devices.metadataDevice = metadataLoopDevice - devices.metadataLoopFile = metadatafilename - } - } - - return nil -} - -func (devices *DeviceSet) enableDeferredRemovalDeletion() error { - // If user asked for deferred removal then check both libdm library - // and kernel driver support deferred removal otherwise error out. - if enableDeferredRemoval { - if !driverDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") - } - if !devicemapper.LibraryDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") - } - logrus.Debug("devmapper: Deferred removal support enabled.") - devices.deferredRemove = true - } - - if enableDeferredDeletion { - if !devices.deferredRemove { - return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") - } - logrus.Debug("devmapper: Deferred deletion support enabled.") - devices.deferredDelete = true - } - return nil -} - -func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { - if err := devices.enableDeferredRemovalDeletion(); err != nil { - return err - } - - // https://github.com/docker/docker/issues/4036 - if supported := devicemapper.UdevSetSyncSupport(true); !supported { - logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") - - if !devices.overrideUdevSyncCheck { - return graphdriver.ErrNotSupported - } - } - - // create the root dir of the devmapper driver ownership to match this - // daemon's remapped root uid/gid so containers can start properly - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAs(devices.root, 0o700, uid, gid); err != nil { - return err - } - if err := os.MkdirAll(devices.metadataDir(), 0o700); err != nil { - return err - } - if err := idtools.MkdirAs(filepath.Join(devices.root, "mnt"), 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) { - return err - } - - prevSetupConfig, err := readLVMConfig(devices.root) - if err != nil { - return err - } - - if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { - if devices.thinPoolDevice != "" { - return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") - } - - if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { - if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { - return errors.New("changing direct-lvm config is not supported") - } - logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") - if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { - return err - } - if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { - return err - } - if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { - return err - } - } - devices.thinPoolDevice = "storage-thinpool" - logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) - } - - // Set the device prefix from the device id and inode of the storage root dir - var st unix.Stat_t - if err := unix.Stat(devices.root, &st); err != nil { - return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) - } - // "reg-" stands for "regular file". - // In the future we might use "dev-" for "device file", etc. - // container-maj,min[-inode] stands for: - // - Managed by container storage - // - The target of this device is at major and minor - // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(uint64(st.Dev)), minor(uint64(st.Dev)), st.Ino) - logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) - - // Check for the existence of the thin-pool device - poolExists, err := devices.thinPoolExists(devices.getPoolName()) - if err != nil { - return err - } - - // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files - // that are not Close-on-exec, - // so we add this badhack to make sure it closes itself - setCloseOnExec("/dev/mapper/control") - - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - createdLoopback := false - - // If the pool doesn't exist, create it - if !poolExists && devices.thinPoolDevice == "" { - logrus.Debug("devmapper: Pool doesn't exist. Creating it.") - - var ( - dataFile *os.File - metadataFile *os.File - ) - - fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir()) - if err != nil { - return err - } - switch fsMagic { - case graphdriver.FsMagicAufs: - return fmt.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") - } - - if devices.dataDevice == "" { - // Make sure the sparse images exist in /devicemapper/data - - hasData := devices.hasImage("data") - - if !doInit && !hasData { - return errors.New("loopback data file not found") - } - - if !hasData { - createdLoopback = true - } - - data, err := devices.ensureImage("data", devices.dataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) - return err - } - - dataFile, err = loopback.AttachLoopDevice(data) - if err != nil { - return err - } - devices.dataLoopFile = data - devices.dataDevice = dataFile.Name() - } else { - dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0o600) - if err != nil { - return err - } - } - defer dataFile.Close() - - if devices.metadataDevice == "" { - // Make sure the sparse images exist in /devicemapper/metadata - - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasMetadata { - return errors.New("loopback metadata file not found") - } - - if !hasMetadata { - createdLoopback = true - } - - metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) - return err - } - - metadataFile, err = loopback.AttachLoopDevice(metadata) - if err != nil { - return err - } - devices.metadataLoopFile = metadata - devices.metadataDevice = metadataFile.Name() - } else { - metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0o600) - if err != nil { - return err - } - } - defer metadataFile.Close() - - if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { - return err - } - defer func() { - if retErr != nil { - err = devices.deactivatePool() - if err != nil { - logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) - } - } - }() - } - - // Pool already exists and caller did not pass us a pool. That means - // we probably created pool earlier and could not remove it as some - // containers were still using it. Detect some of the properties of - // pool, like is it using loop devices. - if poolExists && devices.thinPoolDevice == "" { - if err := devices.loadThinPoolLoopBackInfo(); err != nil { - logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) - return err - } - } - - // If we didn't just create the data or metadata image, we need to - // load the transaction id and migrate old metadata - if !createdLoopback { - if err := devices.initMetaData(); err != nil { - return err - } - } - - if devices.thinPoolDevice == "" { - if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { - logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.") - } - } - - // Right now this loads only NextDeviceID. If there is more metadata - // down the line, we might have to move it earlier. - if err := devices.loadDeviceSetMetaData(); err != nil { - return err - } - - // Setup the base image - if doInit { - if err := devices.setupBaseImage(); err != nil { - logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) - return err - } - } - - return nil -} - -// AddDevice adds a device and registers in the hash. -func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { - logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) - defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) - - // If a deleted device exists, return error. - baseInfo, err := devices.lookupDeviceWithLock(baseHash) - if err != nil { - return err - } - - if baseInfo.Deleted { - return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) - } - - baseInfo.lock.Lock() - defer baseInfo.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - // Also include deleted devices in case hash of new device is - // same as one of the deleted devices. - if info, _ := devices.lookupDevice(hash); info != nil { - return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) - } - - size, err := devices.parseStorageOpt(storageOpt) - if err != nil { - return err - } - - if size == 0 { - size = baseInfo.Size - } - - if size < baseInfo.Size { - return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) - } - - if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { - return err - } - - // Grow the container rootfs. - if size > baseInfo.Size { - info, err := devices.lookupDevice(hash) - if err != nil { - return err - } - - if err := devices.growFS(info); err != nil { - return err - } - } - - return nil -} - -func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { - // Read size to change the block device size per container. - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return 0, err - } - return uint64(size), nil - default: - return 0, fmt.Errorf("unknown option %s", key) - } - } - - return 0, nil -} - -func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { - // If device is already in deleted state, there is nothing to be done. - if info.Deleted { - return nil - } - - logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) - - info.Deleted = true - - // save device metadata to reflect deleted state. - if err := devices.saveMetadata(info); err != nil { - info.Deleted = false - return err - } - - devices.nrDeletedDevices++ - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteDeviceNoLock(info *devInfo, syncDelete bool) error { - err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) - if err != nil { - // If syncDelete is true, we want to return error. If deferred - // deletion is not enabled, we return an error. If error is - // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || !errors.Is(err, devicemapper.ErrBusy) { - logrus.Debugf("devmapper: Error deleting device: %s", err) - return err - } - } - - if err == nil { - if err := devices.unregisterDevice(info.Hash); err != nil { - return err - } - // If device was already in deferred delete state that means - // deletion was being tried again later. Reduce the deleted - // device count. - if info.Deleted { - devices.nrDeletedDevices-- - } - devices.markDeviceIDFree(info.DeviceID) - } else { - if err := devices.markForDeferredDeletion(info); err != nil { - return err - } - } - - return nil -} - -// Issue discard only if device open count is zero. -func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) - defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually. - // Even if device is deferred deleted, activate it and issue - // discards. - if err := devices.activateDeviceIfNeeded(info, true); err != nil { - return err - } - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.OpenCount != 0 { - logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) - return nil - } - - if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { - logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) - } - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { - if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { - logrus.WithField("storage-driver", "devicemapper").Debugf("Error opening transaction hash = %s deviceId = %d", info.Hash, info.DeviceID) - return err - } - - defer devices.closeTransaction() - - if devices.doBlkDiscard { - devices.issueDiscard(info) - } - - // Try to deactivate device in case it is active. - // If deferred removal is enabled and deferred deletion is disabled - // then make sure device is removed synchronously. There have been - // some cases of device being busy for short duration and we would - // rather busy wait for device removal to take care of these cases. - deferredRemove := devices.deferredRemove - if !devices.deferredDelete { - deferredRemove = false - } - - if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { - logrus.Debugf("devmapper: Error deactivating device: %s", err) - return err - } - - if err := devices.deleteDeviceNoLock(info, syncDelete); err != nil { - return err - } - - return nil -} - -// DeleteDevice will return success if device has been marked for deferred -// removal. If one wants to override that and want DeleteDevice() to fail if -// device was busy and could not be deleted, set syncDelete=true. -func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) - defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - return devices.deleteDevice(info, syncDelete) -} - -func (devices *DeviceSet) deactivatePool() error { - logrus.Debug("devmapper: deactivatePool() START") - defer logrus.Debug("devmapper: deactivatePool() END") - devname := devices.getPoolDevName() - - devinfo, err := devicemapper.GetInfo(devname) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - if err := devicemapper.RemoveDevice(devname); err != nil { - return err - } - - if d, err := devicemapper.GetDeps(devname); err == nil { - logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) - } - - return nil -} - -func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - return devices.deactivateDeviceMode(info, devices.deferredRemove) -} - -func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { - var err error - logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) - defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - - if deferredRemove { - err = devicemapper.RemoveDeviceDeferred(info.Name()) - } else { - err = devices.removeDevice(info.Name()) - } - - // This function's semantics is such that it does not return an - // error if device does not exist. So if device went away by - // the time we actually tried to remove it, do not return error. - if !errors.Is(err, devicemapper.ErrEnxio) { - return err - } - return nil -} - -// Issues the underlying dm remove operation. -func (devices *DeviceSet) removeDevice(devname string) error { - var err error - - logrus.Debugf("devmapper: removeDevice START(%s)", devname) - defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) - - for i := 0; i < 200; i++ { - err = devicemapper.RemoveDevice(devname) - if err == nil { - break - } - if !errors.Is(err, devicemapper.ErrBusy) { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - } - - return err -} - -func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { - if !devices.deferredRemove { - return nil - } - - logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) - - devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) - if err != nil { - return err - } - - if devinfo != nil && devinfo.DeferredRemove == 0 { - return nil - } - - // Cancel deferred remove - if err := devices.cancelDeferredRemoval(info); err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if !errors.Is(err, devicemapper.ErrEnxio) { - return err - } - } - return nil -} - -func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { - logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) - - var err error - - // Cancel deferred remove - for i := 0; i < 100; i++ { - err = devicemapper.CancelDeferredRemove(info.Name()) - if err != nil { - if !errors.Is(err, devicemapper.ErrBusy) { - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - continue - } - } - break - } - return err -} - -func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { - files, err := os.ReadDir(dir) - if err != nil { - logrus.Warnf("devmapper: unmountAndDeactivate: %s", err) - return - } - - for _, d := range files { - if !d.IsDir() { - continue - } - - name := d.Name() - fullname := path.Join(dir, name) - - // We use MNT_DETACH here in case it is still busy in some running - // container. This means it'll go away from the global scope directly, - // and the device will be released when that container dies. - if err := mount.Unmount(fullname); err != nil { - logrus.Warnf("devmapper.Shutdown error: %s", err) - } - - if devInfo, err := devices.lookupDevice(name); err != nil { - logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", name, err) - } else { - if err := devices.deactivateDevice(devInfo); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate %s, error: %s", devInfo.Hash, err) - } - } - } -} - -// Shutdown shuts down the device by unmounting the root. -func (devices *DeviceSet) Shutdown(home string) error { - logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) - logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) - defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) - - // Stop deletion worker. This should start delivering new events to - // ticker channel. That means no new instance of cleanupDeletedDevice() - // will run after this call. If one instance is already running at - // the time of the call, it must be holding devices.Lock() and - // we will block on this lock till cleanup function exits. - devices.deletionWorkerTicker.Stop() - - devices.Lock() - // Save DeviceSet Metadata first. Docker kills all threads if they - // don't finish in certain time. It is possible that Shutdown() - // routine does not finish in time as we loop trying to deactivate - // some devices while these are busy. In that case shutdown() routine - // will be killed and we will not get a chance to save deviceset - // metadata. Hence save this early before trying to deactivate devices. - devices.saveDeviceSetMetaData() - devices.unmountAndDeactivateAll(path.Join(home, "mnt")) - devices.Unlock() - - info, _ := devices.lookupDeviceWithLock("") - if info != nil { - info.lock.Lock() - devices.Lock() - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate base, error: %s", err) - } - devices.Unlock() - info.lock.Unlock() - } - - devices.Lock() - if devices.thinPoolDevice == "" { - if err := devices.deactivatePool(); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate pool, error: %s", err) - } - } - devices.Unlock() - - return nil -} - -// Recent XFS changes allow changing behavior of filesystem in case of errors. -// When thin pool gets full and XFS gets ENOSPC error, currently it tries -// IO infinitely and sometimes it can block the container process -// and process can't be killWith 0 value, XFS will not retry upon error -// and instead will shutdown filesystem. - -func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { - dmDevicePath, err := os.Readlink(info.DevName()) - if err != nil { - return fmt.Errorf("devmapper: readlink failed for device %v:%w", info.DevName(), err) - } - - dmDeviceName := path.Base(dmDevicePath) - filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" - maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) - if err != nil { - return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%w", err) - } - defer maxRetriesFile.Close() - - // Set max retries to 0 - _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) - if err != nil { - return fmt.Errorf("devmapper: Failed to write string %v to file %v:%w", devices.xfsNospaceRetries, filePath, err) - } - return nil -} - -// MountDevice mounts the device if not already mounted. -func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - if info.Deleted { - return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - fstype, err := ProbeFsType(info.DevName()) - if err != nil { - return err - } - - options := "" - - if fstype == xfs { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - - mountOptions := devices.mountOptions - if len(moptions.Options) > 0 { - addNouuid := strings.Contains("nouuid", mountOptions) - mountOptions = strings.Join(moptions.Options, ",") - if addNouuid { - mountOptions = fmt.Sprintf("nouuid,%s", mountOptions) - } - } - - options = joinMountOptions(options, mountOptions) - options = joinMountOptions(options, label.FormatMountLabel("", moptions.MountLabel)) - - if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("failed to mount; dmesg: %s: %w", string(dmesg.Dmesg(256)), err) - } - - if fstype == xfs && devices.xfsNospaceRetries != "" { - if err := devices.xfsSetNospaceRetries(info); err != nil { - if err := mount.Unmount(path); err != nil { - logrus.Warnf("devmapper.MountDevice cleanup error: %v", err) - } - devices.deactivateDevice(info) - return err - } - } - - return nil -} - -// UnmountDevice unmounts the device and removes it from hash. -func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) - defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) - - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - logrus.Debugf("devmapper: Unmount(%s)", mountPath) - if err := mount.Unmount(mountPath); err != nil { - if ok, _ := Mounted(mountPath); ok { - return err - } - } - logrus.Debug("devmapper: Unmount done") - - // Remove the mountpoint here. Removing the mountpoint (in newer kernels) - // will cause all other instances of this mount in other mount namespaces - // to be killed (this is an anti-DoS measure that is necessary for things - // like devicemapper). This is necessary to avoid cases where a libdm mount - // that is present in another namespace will cause subsequent RemoveDevice - // operations to fail. We ignore any errors here because this may fail on - // older kernels which don't have - // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. - if err := os.Remove(mountPath); err != nil { - logrus.Debugf("devmapper: error doing a remove on unmounted device %s: %v", mountPath, err) - } - - return devices.deactivateDevice(info) -} - -// HasDevice returns true if the device metadata exists. -func (devices *DeviceSet) HasDevice(hash string) bool { - info, _ := devices.lookupDeviceWithLock(hash) - return info != nil -} - -// List returns a list of device ids. -func (devices *DeviceSet) List() []string { - devices.Lock() - defer devices.Unlock() - - ids := make([]string, len(devices.Devices)) - i := 0 - for k := range devices.Devices { - ids[i] = k - i++ - } - return ids -} - -// ListLayers returns a list of device IDs, omitting the ""/"base" device and -// any which have been marked as deleted. -func (devices *DeviceSet) ListLayers() ([]string, error) { - if err := devices.cleanupDeletedDevices(); err != nil { - return nil, err - } - - devices.Lock() - defer devices.Unlock() - - ids := make([]string, 0, len(devices.Devices)) - for k, d := range devices.Devices { - if k == "" || d.Deleted { - continue - } - ids = append(ids, k) - } - return ids, nil -} - -func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { - var params string - _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) - if err != nil { - logrus.Debugf("could not find devicemapper status: %v", err) - return - } - if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err != nil { - logrus.Debugf("could not find scanf devicemapper status: %v", err) - } - return -} - -// GetDeviceStatus provides size, mapped sectors -func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - status := &DevStatus{ - DeviceID: info.DeviceID, - Size: info.Size, - TransactionID: info.TransactionID, - } - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) - if err != nil { - return nil, err - } - - status.SizeInSectors = sizeInSectors - status.MappedSectors = mappedSectors - status.HighestMappedSector = highestMappedSector - - return status, nil -} - -func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { - var params string - if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { - _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) - } - return -} - -// DataDevicePath returns the path to the data storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) DataDevicePath() string { - return devices.dataDevice -} - -// MetadataDevicePath returns the path to the metadata storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) MetadataDevicePath() string { - return devices.metadataDevice -} - -func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(unix.Statfs_t) - if err := unix.Statfs(loopFile, buf); err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) - return 0, err - } - return buf.Bfree * uint64(buf.Bsize), nil -} - -func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { - if loopFile != "" { - fi, err := os.Stat(loopFile) - if err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) - return false, err - } - return fi.Mode().IsRegular(), nil - } - return false, nil -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) Status() *Status { - devices.Lock() - defer devices.Unlock() - - status := &Status{} - - status.PoolName = devices.getPoolName() - status.DataFile = devices.DataDevicePath() - status.DataLoopback = devices.dataLoopFile - status.MetadataFile = devices.MetadataDevicePath() - status.MetadataLoopback = devices.metadataLoopFile - status.UdevSyncSupported = devicemapper.UdevSyncSupported() - status.DeferredRemoveEnabled = devices.deferredRemove - status.DeferredDeleteEnabled = devices.deferredDelete - status.DeferredDeletedDeviceCount = devices.nrDeletedDevices - status.BaseDeviceSize = devices.getBaseDeviceSize() - status.BaseDeviceFS = devices.getBaseDeviceFS() - - totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err == nil { - // Convert from blocks to bytes - blockSizeInSectors := totalSizeInSectors / dataTotal - - status.Data.Used = dataUsed * blockSizeInSectors * 512 - status.Data.Total = dataTotal * blockSizeInSectors * 512 - status.Data.Available = status.Data.Total - status.Data.Used - - // metadata blocks are always 4k - status.Metadata.Used = metadataUsed * 4096 - status.Metadata.Total = metadataTotal * 4096 - status.Metadata.Available = status.Metadata.Total - status.Metadata.Used - - status.SectorSize = blockSizeInSectors * 512 - - if check, _ := devices.isRealFile(devices.dataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) - if err == nil && actualSpace < status.Data.Available { - status.Data.Available = actualSpace - } - } - - if check, _ := devices.isRealFile(devices.metadataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) - if err == nil && actualSpace < status.Metadata.Available { - status.Metadata.Available = actualSpace - } - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 - } - - return status -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} - return metadata, nil -} - -// NewDeviceSet creates the device set based on the options provided. -func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { - devicemapper.SetDevDir("/dev") - - devices := &DeviceSet{ - root: root, - metaData: metaData{Devices: make(map[string]*devInfo)}, - dataLoopbackSize: defaultDataLoopbackSize, - metaDataLoopbackSize: defaultMetaDataLoopbackSize, - baseFsSize: defaultBaseFsSize, - overrideUdevSyncCheck: defaultUdevSyncOverride, - doBlkDiscard: true, - thinpBlockSize: defaultThinpBlockSize, - deviceIDMap: make([]byte, deviceIDMapSz), - deletionWorkerTicker: time.NewTicker(time.Second * 30), - uidMaps: uidMaps, - gidMaps: gidMaps, - minFreeSpacePercent: defaultMinFreeSpacePercent, - } - - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return nil, graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return nil, graphdriver.ErrNotSupported - } - - if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { - // enable deferred stuff by default - enableDeferredDeletion = true - enableDeferredRemoval = true - } - - foundBlkDiscard := false - var lvmSetupConfig directLVMConfig - testMode := false - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "dm.basesize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - userBaseSize = true - devices.baseFsSize = uint64(size) - case "dm.loopdatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.dataLoopbackSize = size - case "dm.loopmetadatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.metaDataLoopbackSize = size - case "dm.fs": - if val != ext4 && val != xfs { - return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) - } - devices.filesystem = val - case "dm.mkfsarg": - devices.mkfsArgs = append(devices.mkfsArgs, val) - case "dm.mountopt", "devicemapper.mountopt": - devices.mountOptions = joinMountOptions(devices.mountOptions, val) - case "dm.metadatadev": - devices.metadataDevice = val - case "dm.metadata_size": - devices.metaDataSize = val - case "dm.datadev": - devices.dataDevice = val - case "dm.thinpooldev": - devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") - case "dm.blkdiscard": - foundBlkDiscard = true - devices.doBlkDiscard, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.blocksize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - // convert to 512b sectors - devices.thinpBlockSize = uint32(size) >> 9 - case "dm.override_udev_sync_check": - devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_removal": - enableDeferredRemoval, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_deletion": - enableDeferredDeletion, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.metaDataSize": - lvmSetupConfig.MetaDataSize = val - case "dm.min_free_space": - if !strings.HasSuffix(val, "%") { - return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") - } - - valstring := strings.TrimSuffix(val, "%") - minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) - if err != nil { - return nil, err - } - - if minFreeSpacePercent >= 100 { - return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) - } - - devices.minFreeSpacePercent = uint32(minFreeSpacePercent) - case "dm.xfs_nospace_max_retries": - _, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return nil, err - } - devices.xfsNospaceRetries = val - case "dm.directlvm_device": - lvmSetupConfig.Device = val - case "dm.directlvm_device_force": - lvmSetupConfigForce, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.thinp_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.thinp_percent=%s`: %w", val, err) - } - if per >= 100 { - return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpPercent = per - case "dm.thinp_metapercent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.thinp_metapercent=%s`: %w", val, err) - } - if per >= 100 { - return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpMetaPercent = per - case "dm.thinp_autoextend_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_percent=%s`: %w", val, err) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendPercent = per - case "dm.thinp_autoextend_threshold": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.thinp_autoextend_threshold=%s`: %w", val, err) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendThreshold = per - case "dm.libdm_log_level": - level, err := strconv.ParseInt(val, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse `dm.libdm_log_level=%s`: %w", val, err) - } - if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { - return nil, fmt.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) - } - // Register a new logging callback with the specified level. - devicemapper.LogInit(devicemapper.DefaultLogger{ - Level: int(level), - }) - case "test": - testMode, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("devmapper: Unknown option %s", key) - } - } - - if !testMode { - if err := validateLVMConfig(lvmSetupConfig); err != nil { - return nil, err - } - } - - devices.lvmSetupConfig = lvmSetupConfig - - // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive - if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { - devices.doBlkDiscard = false - } - - if err := devices.initDevmapper(doInit); err != nil { - return nil, err - } - - return devices, nil -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go deleted file mode 100644 index f85fb9479..000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go +++ /dev/null @@ -1,109 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -// Definition of struct dm_task and sub structures (from lvm2) -// -// struct dm_ioctl { -// /* -// * The version number is made up of three parts: -// * major - no backward or forward compatibility, -// * minor - only backwards compatible, -// * patch - both backwards and forwards compatible. -// * -// * All clients of the ioctl interface should fill in the -// * version number of the interface that they were -// * compiled with. -// * -// * All recognized ioctl commands (ie. those that don't -// * return -ENOTTY) fill out this field, even if the -// * command failed. -// */ -// uint32_t version[3]; /* in/out */ -// uint32_t data_size; /* total size of data passed in -// * including this struct */ - -// uint32_t data_start; /* offset to start of data -// * relative to start of this struct */ - -// uint32_t target_count; /* in/out */ -// int32_t open_count; /* out */ -// uint32_t flags; /* in/out */ - -// /* -// * event_nr holds either the event number (input and output) or the -// * udev cookie value (input only). -// * The DM_DEV_WAIT ioctl takes an event number as input. -// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls -// * use the field as a cookie to return in the DM_COOKIE -// * variable with the uevents they issue. -// * For output, the ioctls return the event number, not the cookie. -// */ -// uint32_t event_nr; /* in/out */ -// uint32_t padding; - -// uint64_t dev; /* in/out */ - -// char name[DM_NAME_LEN]; /* device name */ -// char uuid[DM_UUID_LEN]; /* unique identifier for -// * the block device */ -// char data[7]; /* padding or data */ -// }; - -// struct target { -// uint64_t start; -// uint64_t length; -// char *type; -// char *params; - -// struct target *next; -// }; - -// typedef enum { -// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ -// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ -// } dm_add_node_t; - -// struct dm_task { -// int type; -// char *dev_name; -// char *mangled_dev_name; - -// struct target *head, *tail; - -// int read_only; -// uint32_t event_nr; -// int major; -// int minor; -// int allow_default_major_fallback; -// uid_t uid; -// gid_t gid; -// mode_t mode; -// uint32_t read_ahead; -// uint32_t read_ahead_flags; -// union { -// struct dm_ioctl *v4; -// } dmi; -// char *newname; -// char *message; -// char *geometry; -// uint64_t sector; -// int no_flush; -// int no_open_count; -// int skip_lockfs; -// int query_inactive_table; -// int suppress_identical_reload; -// dm_add_node_t add_node; -// uint64_t existing_table_size; -// int cookie_set; -// int new_uuid; -// int secure_data; -// int retry_remove; -// int enable_checks; -// int expected_errno; - -// char *uuid; -// char *mangled_uuid; -// }; -// diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go deleted file mode 100644 index 8b8a1d177..000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ /dev/null @@ -1,271 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import ( - "fmt" - "os" - "path" - "strconv" - - graphdriver "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/devicemapper" - "github.com/containers/storage/pkg/directory" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" - "github.com/containers/storage/pkg/mount" - units "github.com/docker/go-units" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const defaultPerms = os.FileMode(0o555) - -func init() { - graphdriver.MustRegister("devicemapper", Init) -} - -// Driver contains the device set mounted and the home directory -type Driver struct { - *DeviceSet - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - locker *locker.Locker -} - -// Init creates a driver with the given home and the set of options. -func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true, options.DriverOptions, options.UIDMaps, options.GIDMaps) - if err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - d := &Driver{ - DeviceSet: deviceSet, - home: home, - uidMaps: options.UIDMaps, - gidMaps: options.GIDMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - locker: locker.New(), - } - return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil -} - -func (d *Driver) String() string { - return "devicemapper" -} - -// Status returns the status about the driver in a printable format. -// Information returned contains Pool Name, Data File, Metadata file, disk usage by -// the data and metadata, etc. -func (d *Driver) Status() [][2]string { - s := d.DeviceSet.Status() - - status := [][2]string{ - {"Pool Name", s.PoolName}, - {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, - {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, - {"Backing Filesystem", s.BaseDeviceFS}, - {"Data file", s.DataFile}, - {"Metadata file", s.MetadataFile}, - {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, - {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, - {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, - {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, - {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, - {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, - {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, - {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, - {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, - {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, - {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, - } - if len(s.DataLoopback) > 0 { - status = append(status, [2]string{"Data loop file", s.DataLoopback}) - } - if len(s.MetadataLoopback) > 0 { - status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) - } - if vStr, err := devicemapper.GetLibraryVersion(); err == nil { - status = append(status, [2]string{"Library Version", vStr}) - } - return status -} - -// Metadata returns a map of information about the device. -func (d *Driver) Metadata(id string) (map[string]string, error) { - m, err := d.DeviceSet.exportDeviceMetadata(id) - if err != nil { - return nil, err - } - - metadata := make(map[string]string) - metadata["DeviceId"] = strconv.Itoa(m.deviceID) - metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) - metadata["DeviceName"] = m.deviceName - return metadata, nil -} - -// Cleanup unmounts a device. -func (d *Driver) Cleanup() error { - err := d.DeviceSet.Shutdown(d.home) - - umountErr := mount.Unmount(d.home) - // in case we have two errors, prefer the one from Shutdown() - if err != nil { - return err - } - - return umountErr -} - -// CreateFromTemplate creates a layer with the same contents and parent as another layer. -func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { - return d.Create(id, template, opts) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create adds a device with a given id and the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - - if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { - return err - } - - return nil -} - -// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point. -func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - if !d.DeviceSet.HasDevice(id) { - // Consider removing a non-existing device a no-op - // This is useful to be able to progress on container removal - // if the underlying device has gone away due to earlier errors - return nil - } - - // This assumes the device has been properly Get/Put:ed and thus is unmounted - if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return fmt.Errorf("failed to remove device %s: %v", id, err) - } - - // Most probably the mount point is already removed on Put() - // (see DeviceSet.UnmountDevice()), but just in case it was not - // let's try to remove it here as well, ignoring errors as - // an older kernel can return EBUSY if e.g. the mount was leaked - // to other mount namespaces. A failure to remove the container's - // mount point is not important and should not be treated - // as a failure to remove the container. - mp := path.Join(d.home, "mnt", id) - err := unix.Rmdir(mp) - if err != nil && !os.IsNotExist(err) { - logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err) - } - - return nil -} - -// Get mounts a device with given id into the root filesystem -func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - mp := path.Join(d.home, "mnt", id) - rootFs := path.Join(mp, "rootfs") - if count := d.ctr.Increment(mp); count > 1 { - return rootFs, nil - } - - uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - d.ctr.Decrement(mp) - return "", err - } - - // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0o755, uid, gid); err != nil { - d.ctr.Decrement(mp) - return "", err - } - if err := idtools.MkdirAs(mp, 0o755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - return "", err - } - - // Mount the device - if err := d.DeviceSet.MountDevice(id, mp, options); err != nil { - d.ctr.Decrement(mp) - return "", err - } - - if err := idtools.MkdirAllAs(rootFs, defaultPerms, uid, gid); err != nil { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - - idFile := path.Join(mp, "id") - if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { - // Create an "id" file with the container/image id in it to help reconstruct this in case - // of later problems - if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - } - - return rootFs, nil -} - -// Put unmounts a device and removes it. -func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - mp := path.Join(d.home, "mnt", id) - if count := d.ctr.Decrement(mp); count > 0 { - return nil - } - - err := d.DeviceSet.UnmountDevice(id, mp) - if err != nil { - logrus.Errorf("devmapper: Error unmounting device %s: %v", id, err) - } - - return err -} - -// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. -// For devmapper, it queries the mnt path for this ID. -func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - return directory.Usage(path.Join(d.home, "mnt", id)) -} - -// Exists checks to see if the device exists. -func (d *Driver) Exists(id string) bool { - return d.DeviceSet.HasDevice(id) -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go b/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go deleted file mode 100644 index 52f0e863e..000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import jsoniter "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go deleted file mode 100644 index db903ca55..000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/mount.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devmapper - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -// Mounted returns true if a mount point exists. -func Mounted(mountpoint string) (bool, error) { - var mntpointSt unix.Stat_t - if err := unix.Stat(mountpoint, &mntpointSt); err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - var parentSt unix.Stat_t - if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { - return false, err - } - return mntpointSt.Dev != parentSt.Dev, nil -} - -type probeData struct { - fsName string - magic string - offset uint64 -} - -// ProbeFsType returns the filesystem name for the given device id. -func ProbeFsType(device string) (string, error) { - probes := []probeData{ - {"btrfs", "_BHRfS_M", 0x10040}, - {"ext4", "\123\357", 0x438}, - {"xfs", "XFSB", 0}, - } - - maxLen := uint64(0) - for _, p := range probes { - l := p.offset + uint64(len(p.magic)) - if l > maxLen { - maxLen = l - } - } - - file, err := os.Open(device) - if err != nil { - return "", err - } - defer file.Close() - - buffer := make([]byte, maxLen) - l, err := file.Read(buffer) - if err != nil { - return "", err - } - - if uint64(l) != maxLen { - return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) - } - - for _, p := range probes { - if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { - return p.fsName, nil - } - } - - return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) -} - -func joinMountOptions(a, b string) string { - if a == "" { - return b - } - if b == "" { - return a - } - return a + "," + b -} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index aa99fdead..b42ba0757 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -10,6 +10,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" @@ -297,8 +298,8 @@ type AdditionalLayerStoreDriver interface { Driver // LookupAdditionalLayer looks up additional layer store by the specified - // digest and ref and returns an object representing that layer. - LookupAdditionalLayer(d digest.Digest, ref string) (AdditionalLayer, error) + // TOC digest and ref and returns an object representing that layer. + LookupAdditionalLayer(tocDigest digest.Digest, ref string) (AdditionalLayer, error) // LookupAdditionalLayer looks up additional layer store by the specified // ID and returns an object representing that layer. @@ -471,7 +472,7 @@ func ScanPriorDrivers(root string) map[string]bool { for driver := range drivers { p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil { + if err := fileutils.Exists(p); err == nil { driversMap[driver] = true } } diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go index cd806b8ff..ee0fc7bfc 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/containers/storage/drivers/driver_linux.go @@ -94,8 +94,6 @@ var ( // Slice of drivers that should be used in an order Priority = []string{ "overlay", - // We don't support devicemapper without configuration - // "devicemapper", "aufs", "btrfs", "zfs", diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs.go b/vendor/github.com/containers/storage/drivers/overlay/composefs.go index baa9d7bef..8f07c2360 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/composefs.go +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs.go @@ -4,12 +4,14 @@ package overlay import ( + "bytes" "encoding/binary" "errors" "fmt" "os" "os/exec" "path/filepath" + "strings" "sync" "github.com/containers/storage/pkg/chunked/dump" @@ -70,12 +72,18 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com // a scope to close outFd before setting fsverity on the read-only fd. defer outFd.Close() + errBuf := &bytes.Buffer{} cmd := exec.Command(writerJson, "--from-file", "-", "/proc/self/fd/3") cmd.ExtraFiles = []*os.File{outFd} - cmd.Stderr = os.Stderr + cmd.Stderr = errBuf cmd.Stdin = dumpReader if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to convert json to erofs: %w", err) + rErr := fmt.Errorf("failed to convert json to erofs: %w", err) + exitErr := &exec.ExitError{} + if errors.As(err, &exitErr) { + return fmt.Errorf("%w: %s", rErr, strings.TrimSpace(errBuf.String())) + } + return rErr } return nil }() diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index f007aa943..8b6f64b1e 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -24,9 +24,11 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idmap" "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" @@ -82,6 +84,8 @@ const ( lowerFile = "lower" maxDepth = 500 + stagingLockFile = "staging.lock" + tocArtifact = "toc" fsVerityDigestsArtifact = "fs-verity-digests" @@ -126,6 +130,8 @@ type Driver struct { usingMetacopy bool usingComposefs bool + stagingDirsLocks map[string]*lockfile.LockFile + supportsIDMappedMounts *bool } @@ -459,6 +465,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) supportsVolatile: supportsVolatile, usingComposefs: opts.useComposefs, options: *opts, + stagingDirsLocks: make(map[string]*lockfile.LockFile), } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)) @@ -574,7 +581,7 @@ func parseOptions(options []string) (*overlayOptions, error) { case "mount_program": logrus.Debugf("overlay: mount_program=%s", val) if val != "" { - _, err := os.Stat(val) + err := fileutils.Exists(val) if err != nil { return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err) } @@ -676,7 +683,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { } for _, dir := range []string{home, runhome} { - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { _ = idtools.MkdirAllAs(dir, 0o700, 0, 0) } } @@ -854,7 +861,7 @@ func (d *Driver) Status() [][2]string { // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { return nil, err } @@ -875,20 +882,54 @@ func (d *Driver) Metadata(id string) (map[string]string, error) { return metadata, nil } -// Cleanup any state created by overlay which should be cleaned when daemon -// is being shutdown. For now, we just have to unmount the bind mounted -// we had created. +// Cleanup any state created by overlay which should be cleaned when +// the storage is being shutdown. The only state created by the driver +// is the bind mount on the home directory. func (d *Driver) Cleanup() error { - _ = os.RemoveAll(filepath.Join(d.home, stagingDir)) + anyPresent := d.pruneStagingDirectories() + if anyPresent { + return nil + } return mount.Unmount(d.home) } +// pruneStagingDirectories cleans up any staging directory that was leaked. +// It returns whether any staging directory is still present. +func (d *Driver) pruneStagingDirectories() bool { + for _, lock := range d.stagingDirsLocks { + lock.Unlock() + } + d.stagingDirsLocks = make(map[string]*lockfile.LockFile) + + anyPresent := false + + homeStagingDir := filepath.Join(d.home, stagingDir) + dirs, err := os.ReadDir(homeStagingDir) + if err == nil { + for _, dir := range dirs { + stagingDirToRemove := filepath.Join(homeStagingDir, dir.Name()) + lock, err := lockfile.GetLockFile(filepath.Join(stagingDirToRemove, stagingLockFile)) + if err != nil { + anyPresent = true + continue + } + if err := lock.TryLock(); err != nil { + anyPresent = true + continue + } + _ = os.RemoveAll(stagingDirToRemove) + lock.Unlock() + } + } + return anyPresent +} + // LookupAdditionalLayer looks up additional layer store by the specified -// digest and ref and returns an object representing that layer. +// TOC digest and ref and returns an object representing that layer. // This API is experimental and can be changed without bumping the major version number. // TODO: to remove the comment once it's no longer experimental. -func (d *Driver) LookupAdditionalLayer(dgst digest.Digest, ref string) (graphdriver.AdditionalLayer, error) { - l, err := d.getAdditionalLayerPath(dgst, ref) +func (d *Driver) LookupAdditionalLayer(tocDigest digest.Digest, ref string) (graphdriver.AdditionalLayer, error) { + l, err := d.getAdditionalLayerPath(tocDigest, ref) if err != nil { return nil, err } @@ -1016,7 +1057,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl rootGID = int(st.GID()) } - if _, err := system.Lstat(dir); err == nil { + if err := fileutils.Lexists(dir); err == nil { logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, // so that we can’t end up with two symlinks in linkDir pointing to the same layer. @@ -1144,7 +1185,7 @@ func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists - if _, err := os.Lstat(parentDir); err != nil { + if err := fileutils.Lexists(parentDir); err != nil { return "", err } @@ -1197,10 +1238,10 @@ func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) { newpath := path.Join(homedir, id) - if _, err := os.Stat(newpath); err != nil { + if err := fileutils.Exists(newpath); err != nil { for _, p := range d.getAllImageStores() { l := path.Join(p, d.name, id) - _, err = os.Stat(l) + err = fileutils.Exists(l) if err == nil { return l, homedir, true } @@ -1340,7 +1381,7 @@ func (d *Driver) recreateSymlinks() error { linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) // Check if the symlink exists, and if it doesn't, create it again with the // name we got from the "link" file - _, err = os.Lstat(linkPath) + err = fileutils.Lexists(linkPath) if err != nil && os.IsNotExist(err) { if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { errs = multierror.Append(errs, err) @@ -1417,7 +1458,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { dir, _, inAdditionalStore := d.dir2(id, false) - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { return "", err } @@ -1528,8 +1569,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO composeFsLayersDir := filepath.Join(dir, "composefs-layers") maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) - _, err = os.Stat(composefsBlob) - if err != nil { + if err := fileutils.Exists(composefsBlob); err != nil { if os.IsNotExist(err) { return "", nil } @@ -1633,11 +1673,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO absLowers = append(absLowers, lower) diffN = 1 - _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + err = fileutils.Exists(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) for err == nil { absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) diffN++ - _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + err = fileutils.Exists(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) } } @@ -1660,15 +1700,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return "", err } // if it is in an additional store, do not fail if the directory already exists - if _, err2 := os.Stat(diffDir); err2 != nil { + if err2 := fileutils.Exists(diffDir); err2 != nil { return "", err } } mergedDir := path.Join(dir, "merged") - // Create the driver merged dir - if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return "", err + // Attempt to create the merged dir only if it doesn't exist. + if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) { + if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return "", err + } } if count := d.ctr.Increment(mergedDir); count > 1 { return mergedDir, nil @@ -1834,14 +1876,14 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { dir, _, inAdditionalStore := d.dir2(id, false) - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { return err } mountpoint := path.Join(dir, "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + if err := fileutils.Exists(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { return err } @@ -1849,7 +1891,7 @@ func (d *Driver) Put(id string) error { mappedRoot := filepath.Join(d.home, id, "mapped") // It should not happen, but cleanup any mapped mount if it was leaked. - if _, err := os.Stat(mappedRoot); err == nil { + if err := fileutils.Exists(mappedRoot); err == nil { mounts, err := os.ReadDir(mappedRoot) if err == nil { // Go through all of the mapped mounts. @@ -1920,7 +1962,7 @@ func (d *Driver) Put(id string) error { // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) + err := fileutils.Exists(d.dir(id)) return err == nil } @@ -2027,7 +2069,14 @@ func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { // CleanupStagingDirectory cleanups the staging directory. func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { - return os.RemoveAll(stagingDirectory) + parentStagingDir := filepath.Dir(stagingDirectory) + + if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok { + delete(d.stagingDirsLocks, parentStagingDir) + lock.Unlock() + } + + return os.RemoveAll(parentStagingDir) } func supportsDataOnlyLayersCached(home, runhome string) (bool, error) { @@ -2048,8 +2097,8 @@ func supportsDataOnlyLayersCached(home, runhome string) (bool, error) { return supportsDataOnly, err } -// ApplyDiff applies the changes in the new layer using the specified function -func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { +// ApplyDiffWithDiffer applies the changes in the new layer using the specified function +func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, errRet error) { var idMappings *idtools.IDMappings if options != nil { idMappings = options.Mappings @@ -2066,7 +2115,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App if err != nil && !os.IsExist(err) { return graphdriver.DriverWithDifferOutput{}, err } - applyDir, err = os.MkdirTemp(stagingDir, "") + layerDir, err := os.MkdirTemp(stagingDir, "") if err != nil { return graphdriver.DriverWithDifferOutput{}, err } @@ -2074,9 +2123,23 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App if d.options.forceMask != nil { perms = *d.options.forceMask } - if err := os.Chmod(applyDir, perms); err != nil { + applyDir = filepath.Join(layerDir, "dir") + if err := os.Mkdir(applyDir, perms); err != nil { return graphdriver.DriverWithDifferOutput{}, err } + + lock, err := lockfile.GetLockFile(filepath.Join(layerDir, stagingLockFile)) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + defer func() { + if errRet != nil { + delete(d.stagingDirsLocks, layerDir) + lock.Unlock() + } + }() + d.stagingDirsLocks[layerDir] = lock + lock.Lock() } else { var err error applyDir, err = d.getDiffPath(id) @@ -2110,9 +2173,19 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App // ApplyDiffFromStagingDirectory applies the changes using the specified staging directory. func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error { stagingDirectory := diffOutput.Target - if filepath.Dir(stagingDirectory) != d.getStagingDir(id) { + parentStagingDir := filepath.Dir(stagingDirectory) + + defer func() { + if lock, ok := d.stagingDirsLocks[parentStagingDir]; ok { + delete(d.stagingDirsLocks, parentStagingDir) + lock.Unlock() + } + }() + + if filepath.Dir(parentStagingDir) != d.getStagingDir(id) { return fmt.Errorf("%q is not a staging directory", stagingDirectory) } + diffPath, err := d.getDiffPath(id) if err != nil { return err @@ -2332,7 +2405,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp } for err == nil { i++ - _, err = os.Stat(nameWithSuffix(diffDir, i)) + err = fileutils.Exists(nameWithSuffix(diffDir, i)) } for i > 0 { @@ -2403,21 +2476,21 @@ func nameWithSuffix(name string, number int) string { return fmt.Sprintf("%s%d", name, number) } -func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, error) { +func (d *Driver) getAdditionalLayerPath(tocDigest digest.Digest, ref string) (string, error) { refElem := base64.StdEncoding.EncodeToString([]byte(ref)) for _, ls := range d.options.layerStores { ref := "" if ls.withReference { ref = refElem } - target := path.Join(ls.path, ref, dgst.String()) + target := path.Join(ls.path, ref, tocDigest.String()) // Check if all necessary files exist for _, p := range []string{ filepath.Join(target, "diff"), filepath.Join(target, "info"), filepath.Join(target, "blob"), } { - if _, err := os.Stat(p); err != nil { + if err := fileutils.Exists(p); err != nil { wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err) return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown) } @@ -2425,7 +2498,7 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, return target, nil } - return "", fmt.Errorf("additional layer (%q, %q) not found: %w", dgst, ref, graphdriver.ErrLayerUnknown) + return "", fmt.Errorf("additional layer (%q, %q) not found: %w", tocDigest, ref, graphdriver.ErrLayerUnknown) } func (d *Driver) releaseAdditionalLayerByID(id string) { diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go deleted file mode 100644 index a744eaea1..000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !exclude_graphdriver_devicemapper && linux && cgo -// +build !exclude_graphdriver_devicemapper,linux,cgo - -package register - -import ( - // register the devmapper graphdriver - _ "github.com/containers/storage/drivers/devmapper" -) diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 9b552254b..db9032117 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -12,6 +12,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" @@ -210,7 +211,7 @@ func (d *Driver) dir2(id string, useImageStore bool) string { } else { homedir = filepath.Join(d.home, "dir", filepath.Base(id)) } - if _, err := os.Stat(homedir); err != nil { + if err := fileutils.Exists(homedir); err != nil { additionalHomes := d.additionalHomes[:] if d.imageStore != "" { additionalHomes = append(additionalHomes, d.imageStore) @@ -269,7 +270,7 @@ func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { // Exists checks to see if the directory exists for the given id. func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) + err := fileutils.Exists(d.dir(id)) return err == nil } diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 8c2dc18ae..18f90fdc5 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -26,6 +26,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/longpath" @@ -231,7 +232,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt if err != nil { return err } - if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + if err := fileutils.Exists(filepath.Join(parentPath, "Files")); err == nil { // This is a legitimate parent layer (not the empty "-init" layer), // so include it in the layer chain. layerChain = []string{parentPath} @@ -266,7 +267,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt } } - if _, err := os.Lstat(d.dir(parent)); err != nil { + if err := fileutils.Lexists(d.dir(parent)); err != nil { if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) } diff --git a/vendor/github.com/containers/storage/idset.go b/vendor/github.com/containers/storage/idset.go index be9e45cfd..43cf1fb5f 100644 --- a/vendor/github.com/containers/storage/idset.go +++ b/vendor/github.com/containers/storage/idset.go @@ -111,7 +111,7 @@ func (s *idSet) findAvailable(n int) (*idSet, error) { iterator, cancel := s.iterator() defer cancel() for i := iterator(); n > 0 && i != nil; i = iterator() { - i.end = minInt(i.end, i.start+n) + i.end = min(i.end, i.start+n) intervals = append(intervals, *i) n -= i.length() } @@ -129,7 +129,7 @@ func (s *idSet) zip(container *idSet) []idtools.IDMap { defer containerCancel() var out []idtools.IDMap for h, c := hostIterator(), containerIterator(); h != nil && c != nil; { - if n := minInt(h.length(), c.length()); n > 0 { + if n := min(h.length(), c.length()); n > 0 { out = append(out, idtools.IDMap{ ContainerID: c.start, HostID: h.start, @@ -159,12 +159,12 @@ type interval struct { } func (i interval) length() int { - return maxInt(0, i.end-i.start) + return max(0, i.end-i.start) } func (i interval) Intersect(other intervalset.Interval) intervalset.Interval { j := other.(interval) - return interval{start: maxInt(i.start, j.start), end: minInt(i.end, j.end)} + return interval{start: max(i.start, j.start), end: min(i.end, j.end)} } func (i interval) Before(other intervalset.Interval) bool { @@ -183,15 +183,15 @@ func (i interval) Bisect(other intervalset.Interval) (intervalset.Interval, inte } // Subtracting [j.start, j.end) is equivalent to the union of intersecting (-inf, j.start) and // [j.end, +inf). - left := interval{start: i.start, end: minInt(i.end, j.start)} - right := interval{start: maxInt(i.start, j.end), end: i.end} + left := interval{start: i.start, end: min(i.end, j.start)} + right := interval{start: max(i.start, j.end), end: i.end} return left, right } func (i interval) Adjoin(other intervalset.Interval) intervalset.Interval { j := other.(interval) if !i.IsZero() && !j.IsZero() && (i.end == j.start || j.end == i.start) { - return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + return interval{start: min(i.start, j.start), end: max(i.end, j.end)} } return interval{} } @@ -204,24 +204,10 @@ func (i interval) Encompass(other intervalset.Interval) intervalset.Interval { case j.IsZero(): return i default: - return interval{start: minInt(i.start, j.start), end: maxInt(i.end, j.end)} + return interval{start: min(i.start, j.start), end: max(i.end, j.end)} } } -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - -func maxInt(a, b int) int { - if a < b { - return b - } - return a -} - func hasOverlappingRanges(mappings []idtools.IDMap) error { hostIntervals := intervalset.Empty() containerIntervals := intervalset.Empty() diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 70f76d66d..77c9c818c 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -1023,7 +1023,7 @@ loop: // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs) if err != nil { return err diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 01c6f30c2..448784549 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -13,6 +13,7 @@ import ( "syscall" "time" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/system" @@ -106,7 +107,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { func aufsWhiteoutPresent(root, path string) (bool, error) { f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) - _, err := os.Stat(filepath.Join(root, f)) + err := fileutils.Exists(filepath.Join(root, f)) if err == nil { return true, nil } diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go index 55f753bf4..4d46167d7 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -8,6 +8,7 @@ import ( "path/filepath" "strings" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -94,7 +95,7 @@ func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { // items in the resulting tar archive to match the given rebaseName if not "". func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { + if err = fileutils.Lexists(sourcePath); err != nil { // Catches the case where the source does not exist or is not a // directory if asserted to be a directory, as this also causes an // error. diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 713551859..ceaa8b0b7 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -10,6 +10,7 @@ import ( "runtime" "strings" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/system" @@ -84,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 0o755) if err != nil { return 0, err @@ -130,7 +131,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if strings.HasPrefix(base, WhiteoutPrefix) { dir := filepath.Dir(path) if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) + err := fileutils.Lexists(dir) if err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index f221a2283..5ff9f6b51 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" ) @@ -76,7 +77,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions rootIDs := idMappings.RootPair() dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := fileutils.Exists(dest); os.IsNotExist(err) { if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go new file mode 100644 index 000000000..45d76ec30 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/chunked/bloom_filter.go @@ -0,0 +1,87 @@ +package chunked + +import ( + "encoding/binary" + "hash/crc32" + "io" +) + +type bloomFilter struct { + bitArray []uint64 + k uint32 +} + +func newBloomFilter(size int, k uint32) *bloomFilter { + numElements := (size + 63) / 64 + if numElements == 0 { + numElements = 1 + } + return &bloomFilter{ + bitArray: make([]uint64, numElements), + k: k, + } +} + +func newBloomFilterFromArray(bitArray []uint64, k uint32) *bloomFilter { + return &bloomFilter{ + bitArray: bitArray, + k: k, + } +} + +func (bf *bloomFilter) hashFn(item []byte, seed uint32) (uint64, uint64) { + if len(item) == 0 { + return 0, 1 + } + mod := uint32(len(bf.bitArray) * 64) + seedSplit := seed % uint32(len(item)) + hash := (crc32.ChecksumIEEE(item[:seedSplit]) ^ crc32.ChecksumIEEE(item[seedSplit:])) % mod + return uint64(hash / 64), uint64(1 << (hash % 64)) +} + +func (bf *bloomFilter) add(item []byte) { + for i := uint32(0); i < bf.k; i++ { + index, mask := bf.hashFn(item, i) + bf.bitArray[index] |= mask + } +} + +func (bf *bloomFilter) maybeContains(item []byte) bool { + for i := uint32(0); i < bf.k; i++ { + index, mask := bf.hashFn(item, i) + if bf.bitArray[index]&mask == 0 { + return false + } + } + return true +} + +func (bf *bloomFilter) writeTo(writer io.Writer) error { + if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil { + return err + } + if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil { + return err + } + if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil { + return err + } + return nil +} + +func readBloomFilter(reader io.Reader) (*bloomFilter, error) { + var bloomFilterLen uint64 + var k uint32 + + if err := binary.Read(reader, binary.LittleEndian, &bloomFilterLen); err != nil { + return nil, err + } + if err := binary.Read(reader, binary.LittleEndian, &k); err != nil { + return nil, err + } + bloomFilterArray := make([]uint64, bloomFilterLen) + if err := binary.Read(reader, binary.LittleEndian, &bloomFilterArray); err != nil { + return nil, err + } + return newBloomFilterFromArray(bloomFilterArray, k), nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 1e3ad86d1..34d1b92f4 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -3,16 +3,16 @@ package chunked import ( "bytes" "encoding/binary" + "encoding/hex" "errors" "fmt" "io" "os" + "runtime" "sort" - "strconv" "strings" "sync" "time" - "unsafe" storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" @@ -21,30 +21,48 @@ import ( jsoniter "github.com/json-iterator/go" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) const ( cacheKey = "chunked-manifest-cache" - cacheVersion = 2 + cacheVersion = 3 digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Using 3 hashes functions and n/m = 10 gives a false positive rate of ~1.7%: + // https://pages.cs.wisc.edu/~cao/papers/summary-cache/node8.html + bloomFilterScale = 10 // how much bigger is the bloom filter than the number of entries + bloomFilterHashes = 3 // number of hash functions for the bloom filter ) -type metadata struct { - tagLen int - digestLen int - tags []byte - vdata []byte +type cacheFile struct { + tagLen int + digestLen int + fnamesLen int + tags []byte + vdata []byte + fnames []byte + bloomFilter *bloomFilter } type layer struct { - id string - metadata *metadata - target string + id string + cacheFile *cacheFile + target string + // mmapBuffer is nil when the cache file is fully loaded in memory. + // Otherwise it points to a mmap'ed buffer that is referenced by cacheFile.vdata. + mmapBuffer []byte + + // reloadWithMmap is set when the current process generates the cache file, + // and cacheFile reuses the memory buffer used by the generation function. + // Next time the layer cache is used, attempt to reload the file using + // mmap. + reloadWithMmap bool } type layersCache struct { - layers []layer + layers []*layer refs int store storage.Store mutex sync.RWMutex @@ -56,14 +74,29 @@ var ( cache *layersCache ) +func (c *layer) release() { + runtime.SetFinalizer(c, nil) + if c.mmapBuffer != nil { + unix.Munmap(c.mmapBuffer) + } +} + +func layerFinalizer(c *layer) { + c.release() +} + func (c *layersCache) release() { cacheMutex.Lock() defer cacheMutex.Unlock() c.refs-- - if c.refs == 0 { - cache = nil + if c.refs != 0 { + return } + for _, l := range c.layers { + l.release() + } + cache = nil } func getLayersCacheRef(store storage.Store) *layersCache { @@ -91,90 +124,183 @@ func getLayersCache(store storage.Store) (*layersCache, error) { return c, nil } +// loadLayerBigData attempts to load the specified cacheKey from a file and mmap its content. +// If the cache is not backed by a file, then it loads the entire content in memory. +// Returns the cache content, and if mmap'ed, the mmap buffer to Munmap. +func (c *layersCache) loadLayerBigData(layerID, bigDataKey string) ([]byte, []byte, error) { + inputFile, err := c.store.LayerBigData(layerID, bigDataKey) + if err != nil { + return nil, nil, err + } + defer inputFile.Close() + + // if the cache is backed by a file, attempt to mmap it. + if osFile, ok := inputFile.(*os.File); ok { + st, err := osFile.Stat() + if err != nil { + logrus.Warningf("Error stat'ing cache file for layer %q: %v", layerID, err) + goto fallback + } + size := st.Size() + if size == 0 { + logrus.Warningf("Cache file size is zero for layer %q: %v", layerID, err) + goto fallback + } + buf, err := unix.Mmap(int(osFile.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED) + if err != nil { + logrus.Warningf("Error mmap'ing cache file for layer %q: %v", layerID, err) + goto fallback + } + // best effort advise to the kernel. + _ = unix.Madvise(buf, unix.MADV_RANDOM) + + return buf, buf, nil + } +fallback: + buf, err := io.ReadAll(inputFile) + return buf, nil, err +} + +func makeBinaryDigest(stringDigest string) ([]byte, error) { + d, err := digest.Parse(stringDigest) + if err != nil { + return nil, err + } + digestBytes, err := hex.DecodeString(d.Encoded()) + if err != nil { + return nil, err + } + algo := []byte(d.Algorithm()) + buf := make([]byte, 0, len(algo)+1+len(digestBytes)) + buf = append(buf, algo...) + buf = append(buf, ':') + buf = append(buf, digestBytes...) + return buf, nil +} + +func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) { + buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + // there is no existing cache to load + if err != nil || buffer == nil { + return nil, nil + } + defer func() { + if errRet != nil && mmapBuffer != nil { + unix.Munmap(mmapBuffer) + } + }() + cacheFile, err := readCacheFileFromMemory(buffer) + if err != nil { + return nil, err + } + return c.createLayer(layerID, cacheFile, mmapBuffer) +} + +// createCacheFileFromTOC attempts to create a cache file for the specified layer. +// If a TOC is not available, the cache won't be created and nil is returned. +func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) { + clFile, err := c.store.LayerBigData(layerID, chunkedLayerDataKey) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + var lcd chunkedLayerData + if err == nil && clFile != nil { + defer clFile.Close() + cl, err := io.ReadAll(clFile) + if err != nil { + return nil, fmt.Errorf("open manifest file: %w", err) + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + + if err := json.Unmarshal(cl, &lcd); err != nil { + return nil, err + } + } + manifestReader, err := c.store.LayerBigData(layerID, bigDataKey) + if err != nil { + // the cache file is not needed since there is no manifest file. + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, err + } + defer manifestReader.Close() + + manifest, err := io.ReadAll(manifestReader) + if err != nil { + return nil, fmt.Errorf("read manifest file: %w", err) + } + + cacheFile, err := writeCache(manifest, lcd.Format, layerID, c.store) + if err != nil { + return nil, err + } + l, err := c.createLayer(layerID, cacheFile, nil) + if err != nil { + return nil, err + } + l.reloadWithMmap = true + return l, nil +} + func (c *layersCache) load() error { c.mutex.Lock() defer c.mutex.Unlock() + loadedLayers := make(map[string]*layer) + for _, r := range c.layers { + loadedLayers[r.id] = r + } allLayers, err := c.store.Layers() if err != nil { return err } - existingLayers := make(map[string]string) - for _, r := range c.layers { - existingLayers[r.id] = r.target - } - currentLayers := make(map[string]string) + var newLayers []*layer for _, r := range allLayers { - currentLayers[r.ID] = r.ID - if _, found := existingLayers[r.ID]; found { - continue - } - - bigData, err := c.store.LayerBigData(r.ID, cacheKey) - // if the cache already exists, read and use it - if err == nil { - defer bigData.Close() - metadata, err := readMetadataFromCache(bigData) - if err == nil { - c.addLayer(r.ID, metadata) + // The layer is present in the store and it is already loaded. Attempt to + // re-use it if mmap'ed. + if l, found := loadedLayers[r.ID]; found { + // If the layer is not marked for re-load, move it to newLayers. + if !l.reloadWithMmap { + delete(loadedLayers, r.ID) + newLayers = append(newLayers, l) continue } - logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) - } else if !errors.Is(err, os.ErrNotExist) { - return err } - - var lcd chunkedLayerData - - clFile, err := c.store.LayerBigData(r.ID, chunkedLayerDataKey) - if err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - if clFile != nil { - cl, err := io.ReadAll(clFile) - if err != nil { - return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) - } - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err := json.Unmarshal(cl, &lcd); err != nil { - return err - } - } - - // otherwise create it from the layer TOC. - manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + // try to read the existing cache file. + l, err := c.loadLayerCache(r.ID) if err != nil { + logrus.Infof("Error loading cache file for layer %q: %v", r.ID, err) + } + if l != nil { + newLayers = append(newLayers, l) continue } - defer manifestReader.Close() - - manifest, err := io.ReadAll(manifestReader) + // the cache file is either not present or broken. Try to generate it from the TOC. + l, err = c.createCacheFileFromTOC(r.ID) if err != nil { - return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + logrus.Warningf("Error creating cache file for layer %q: %v", r.ID, err) } - - metadata, err := writeCache(manifest, lcd.Format, r.ID, c.store) - if err == nil { - c.addLayer(r.ID, metadata) - } - } - - var newLayers []layer - for _, l := range c.layers { - if _, found := currentLayers[l.id]; found { + if l != nil { newLayers = append(newLayers, l) } } + // The layers that are still in loadedLayers are either stale or fully loaded in memory. Clean them up. + for _, l := range loadedLayers { + l.release() + } c.layers = newLayers - return nil } // calculateHardLinkFingerprint calculates a hash that can be used to verify if a file // is usable for deduplication with hardlinks. // To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. -func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { +func calculateHardLinkFingerprint(f *fileMetadata) (string, error) { digester := digest.Canonical.Digester() modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) @@ -207,16 +333,46 @@ func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { return string(digester.Digest()), nil } -// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH -func generateFileLocation(path string, offset, len uint64) []byte { - return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path)) +// generateFileLocation generates a file location in the form $OFFSET$LEN$PATH_POS +func generateFileLocation(pathPos int, offset, len uint64) []byte { + var buf []byte + + buf = binary.AppendUvarint(buf, uint64(pathPos)) + buf = binary.AppendUvarint(buf, offset) + buf = binary.AppendUvarint(buf, len) + + return buf } -// generateTag generates a tag in the form $DIGEST$OFFSET@LEN. -// the [OFFSET; LEN] points to the variable length data where the file locations -// are stored. $DIGEST has length digestLen stored in the metadata file header. -func generateTag(digest string, offset, len uint64) string { - return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) +// parseFileLocation reads what was written by generateFileLocation. +func parseFileLocation(locationData []byte) (int, uint64, uint64, error) { + reader := bytes.NewReader(locationData) + + pathPos, err := binary.ReadUvarint(reader) + if err != nil { + return 0, 0, 0, err + } + + offset, err := binary.ReadUvarint(reader) + if err != nil { + return 0, 0, 0, err + } + + len, err := binary.ReadUvarint(reader) + if err != nil { + return 0, 0, 0, err + } + + return int(pathPos), offset, len, nil +} + +// appendTag appends the $OFFSET$LEN information to the provided $DIGEST. +// The [OFFSET; LEN] points to the variable length data where the file locations +// are stored. $DIGEST has length digestLen stored in the cache file file header. +func appendTag(digest []byte, offset, len uint64) ([]byte, error) { + digest = binary.LittleEndian.AppendUint64(digest, offset) + digest = binary.LittleEndian.AppendUint64(digest, len) + return digest, nil } type setBigData interface { @@ -224,6 +380,77 @@ type setBigData interface { SetLayerBigData(id, key string, data io.Reader) error } +func bloomFilterFromTags(tags [][]byte, digestLen int) *bloomFilter { + bloomFilter := newBloomFilter(len(tags)*bloomFilterScale, bloomFilterHashes) + for _, t := range tags { + bloomFilter.add(t[:digestLen]) + } + return bloomFilter +} + +func writeCacheFileToWriter(writer io.Writer, bloomFilter *bloomFilter, tags [][]byte, tagLen, digestLen int, vdata, fnames bytes.Buffer, tagsBuffer *bytes.Buffer) error { + sort.Slice(tags, func(i, j int) bool { + return bytes.Compare(tags[i], tags[j]) == -1 + }) + for _, t := range tags { + if _, err := tagsBuffer.Write(t); err != nil { + return err + } + } + + // version + if err := binary.Write(writer, binary.LittleEndian, uint64(cacheVersion)); err != nil { + return err + } + + // len of a tag + if err := binary.Write(writer, binary.LittleEndian, uint64(tagLen)); err != nil { + return err + } + + // len of a digest + if err := binary.Write(writer, binary.LittleEndian, uint64(digestLen)); err != nil { + return err + } + + // bloom filter + if err := bloomFilter.writeTo(writer); err != nil { + return err + } + + // tags length + if err := binary.Write(writer, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { + return err + } + + // vdata length + if err := binary.Write(writer, binary.LittleEndian, uint64(vdata.Len())); err != nil { + return err + } + + // fnames length + if err := binary.Write(writer, binary.LittleEndian, uint64(fnames.Len())); err != nil { + return err + } + + // tags + if _, err := writer.Write(tagsBuffer.Bytes()); err != nil { + return err + } + + // variable length data + if _, err := writer.Write(vdata.Bytes()); err != nil { + return err + } + + // file names + if _, err := writer.Write(fnames.Bytes()); err != nil { + return err + } + + return nil +} + // writeCache write a cache for the layer ID. // It generates a sorted list of digests with their offset to the path location and offset. // The same cache is used to lookup files, chunks and candidates for deduplication with hard links. @@ -231,55 +458,99 @@ type setBigData interface { // - digest(file.payload)) // - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) // - digest(i) for each i in chunks(file payload) -func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*metadata, error) { - var vdata bytes.Buffer +func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*cacheFile, error) { + var vdata, tagsBuffer, fnames bytes.Buffer tagLen := 0 digestLen := 0 - var tagsBuffer bytes.Buffer - toc, err := prepareMetadata(manifest, format) + toc, err := prepareCacheFile(manifest, format) if err != nil { return nil, err } - var tags []string + fnamesMap := make(map[string]int) + getFileNamePosition := func(name string) (int, error) { + if pos, found := fnamesMap[name]; found { + return pos, nil + } + pos := fnames.Len() + fnamesMap[name] = pos + + if err := binary.Write(&fnames, binary.LittleEndian, uint32(len(name))); err != nil { + return 0, err + } + if _, err := fnames.WriteString(name); err != nil { + return 0, err + } + return pos, nil + } + + var tags [][]byte for _, k := range toc { if k.Digest != "" { - location := generateFileLocation(k.Name, 0, uint64(k.Size)) - + digest, err := makeBinaryDigest(k.Digest) + if err != nil { + return nil, err + } + fileNamePos, err := getFileNamePosition(k.Name) + if err != nil { + return nil, err + } + location := generateFileLocation(fileNamePos, 0, uint64(k.Size)) off := uint64(vdata.Len()) l := uint64(len(location)) - d := generateTag(k.Digest, off, l) - if tagLen == 0 { - tagLen = len(d) + tag, err := appendTag(digest, off, l) + if err != nil { + return nil, err } - if tagLen != len(d) { + if tagLen == 0 { + tagLen = len(tag) + } + if tagLen != len(tag) { return nil, errors.New("digest with different length found") } - tags = append(tags, d) + tags = append(tags, tag) fp, err := calculateHardLinkFingerprint(k) if err != nil { return nil, err } - d = generateTag(fp, off, l) - if tagLen != len(d) { + digestHardLink, err := makeBinaryDigest(fp) + if err != nil { + return nil, err + } + tag, err = appendTag(digestHardLink, off, l) + if err != nil { + return nil, err + } + if tagLen != len(tag) { return nil, errors.New("digest with different length found") } - tags = append(tags, d) + tags = append(tags, tag) if _, err := vdata.Write(location); err != nil { return nil, err } - - digestLen = len(k.Digest) + digestLen = len(digestHardLink) } if k.ChunkDigest != "" { - location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize)) + fileNamePos, err := getFileNamePosition(k.Name) + if err != nil { + return nil, err + } + location := generateFileLocation(fileNamePos, uint64(k.ChunkOffset), uint64(k.ChunkSize)) off := uint64(vdata.Len()) l := uint64(len(location)) - d := generateTag(k.ChunkDigest, off, l) + + digest, err := makeBinaryDigest(k.ChunkDigest) + if err != nil { + return nil, err + } + d, err := appendTag(digest, off, l) + if err != nil { + return nil, err + } if tagLen == 0 { tagLen = len(d) } @@ -291,17 +562,11 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin if _, err := vdata.Write(location); err != nil { return nil, err } - digestLen = len(k.ChunkDigest) + digestLen = len(digest) } } - sort.Strings(tags) - - for _, t := range tags { - if _, err := tagsBuffer.Write([]byte(t)); err != nil { - return nil, err - } - } + bloomFilter := bloomFilterFromTags(tags, digestLen) pipeReader, pipeWriter := io.Pipe() errChan := make(chan error, 1) @@ -309,49 +574,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin defer pipeWriter.Close() defer close(errChan) - // version - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil { - errChan <- err - return - } - - // len of a tag - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil { - errChan <- err - return - } - - // len of a digest - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil { - errChan <- err - return - } - - // tags length - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil { - errChan <- err - return - } - - // vdata length - if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil { - errChan <- err - return - } - - // tags - if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil { - errChan <- err - return - } - - // variable length data - if _, err := pipeWriter.Write(vdata.Bytes()); err != nil { - errChan <- err - return - } - - errChan <- nil + errChan <- writeCacheFileToWriter(pipeWriter, bloomFilter, tags, tagLen, digestLen, vdata, fnames, &tagsBuffer) }() defer pipeReader.Close() @@ -369,16 +592,21 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) - return &metadata{ - digestLen: digestLen, - tagLen: tagLen, - tags: tagsBuffer.Bytes(), - vdata: vdata.Bytes(), + return &cacheFile{ + digestLen: digestLen, + tagLen: tagLen, + tags: tagsBuffer.Bytes(), + vdata: vdata.Bytes(), + fnames: fnames.Bytes(), + fnamesLen: len(fnames.Bytes()), + bloomFilter: bloomFilter, }, nil } -func readMetadataFromCache(bigData io.Reader) (*metadata, error) { - var version, tagLen, digestLen, tagsLen, vdataLen uint64 +func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) { + bigData := bytes.NewReader(bigDataBuffer) + + var version, tagLen, digestLen, tagsLen, fnamesLen, vdataLen uint64 if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { return nil, err } @@ -391,6 +619,12 @@ func readMetadataFromCache(bigData io.Reader) (*metadata, error) { if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil { return nil, err } + + bloomFilter, err := readBloomFilter(bigData) + if err != nil { + return nil, err + } + if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil { return nil, err } @@ -398,25 +632,32 @@ func readMetadataFromCache(bigData io.Reader) (*metadata, error) { return nil, err } + if err := binary.Read(bigData, binary.LittleEndian, &fnamesLen); err != nil { + return nil, err + } tags := make([]byte, tagsLen) if _, err := bigData.Read(tags); err != nil { return nil, err } - vdata := make([]byte, vdataLen) - if _, err := bigData.Read(vdata); err != nil { - return nil, err - } + // retrieve the unread part of the buffer. + remaining := bigDataBuffer[len(bigDataBuffer)-bigData.Len():] - return &metadata{ - tagLen: int(tagLen), - digestLen: int(digestLen), - tags: tags, - vdata: vdata, + vdata := remaining[:vdataLen] + fnames := remaining[vdataLen:] + + return &cacheFile{ + bloomFilter: bloomFilter, + digestLen: int(digestLen), + fnames: fnames, + fnamesLen: int(fnamesLen), + tagLen: int(tagLen), + tags: tags, + vdata: vdata, }, nil } -func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([]*internal.FileMetadata, error) { +func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([]*fileMetadata, error) { toc, err := unmarshalToc(manifest) if err != nil { // ignore errors here. They might be caused by a different manifest format. @@ -424,10 +665,17 @@ func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([] return nil, nil //nolint: nilnil } + var entries []fileMetadata + for i := range toc.Entries { + entries = append(entries, fileMetadata{ + FileMetadata: toc.Entries[i], + }) + } + switch format { case graphdriver.DifferOutputFormatDir: case graphdriver.DifferOutputFormatFlat: - toc.Entries, err = makeEntriesFlat(toc.Entries) + entries, err = makeEntriesFlat(entries) if err != nil { return nil, err } @@ -435,19 +683,19 @@ func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([] return nil, fmt.Errorf("unknown format %q", format) } - var r []*internal.FileMetadata + var r []*fileMetadata chunkSeen := make(map[string]bool) - for i := range toc.Entries { - d := toc.Entries[i].Digest + for i := range entries { + d := entries[i].Digest if d != "" { - r = append(r, &toc.Entries[i]) + r = append(r, &entries[i]) continue } // chunks do not use hard link dedup so keeping just one candidate is enough cd := toc.Entries[i].ChunkDigest if cd != "" && !chunkSeen[cd] { - r = append(r, &toc.Entries[i]) + r = append(r, &entries[i]) chunkSeen[cd] = true } } @@ -455,49 +703,49 @@ func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([] return r, nil } -func (c *layersCache) addLayer(id string, metadata *metadata) error { +func (c *layersCache) createLayer(id string, cacheFile *cacheFile, mmapBuffer []byte) (*layer, error) { target, err := c.store.DifferTarget(id) if err != nil { - return fmt.Errorf("get checkout directory layer %q: %w", id, err) + return nil, fmt.Errorf("get checkout directory layer %q: %w", id, err) } - - l := layer{ - id: id, - metadata: metadata, - target: target, + l := &layer{ + id: id, + cacheFile: cacheFile, + target: target, + mmapBuffer: mmapBuffer, } - c.layers = append(c.layers, l) - return nil + if mmapBuffer != nil { + runtime.SetFinalizer(l, layerFinalizer) + } + return l, nil } -func byteSliceAsString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} - -func findTag(digest string, metadata *metadata) (string, uint64, uint64) { - if len(digest) != metadata.digestLen { - return "", 0, 0 - } - - nElements := len(metadata.tags) / metadata.tagLen +func findBinaryTag(binaryDigest []byte, cacheFile *cacheFile) (bool, uint64, uint64) { + nElements := len(cacheFile.tags) / cacheFile.tagLen i := sort.Search(nElements, func(i int) bool { - d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) - return strings.Compare(d, digest) >= 0 + d := cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen] + return bytes.Compare(d, binaryDigest) >= 0 }) if i < nElements { - d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) - if digest == d { - startOff := i*metadata.tagLen + metadata.digestLen - parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + d := cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen] + if bytes.Equal(binaryDigest, d) { + startOff := i*cacheFile.tagLen + cacheFile.digestLen - off, _ := strconv.ParseInt(parts[0], 10, 64) + // check for corrupted data, there must be 2 u64 (off and len) after the digest. + if cacheFile.tagLen < cacheFile.digestLen+16 { + return false, 0, 0 + } - len, _ := strconv.ParseInt(parts[1], 10, 64) - return digest, uint64(off), uint64(len) + offsetAndLen := cacheFile.tags[startOff : (i+1)*cacheFile.tagLen] + + off := binary.LittleEndian.Uint64(offsetAndLen[:8]) + len := binary.LittleEndian.Uint64(offsetAndLen[8:16]) + + return true, off, len } } - return "", 0, 0 + return false, 0, 0 } func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) { @@ -505,20 +753,42 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, return "", "", -1, nil } + binaryDigest, err := makeBinaryDigest(digest) + if err != nil { + return "", "", 0, err + } + c.mutex.RLock() defer c.mutex.RUnlock() for _, layer := range c.layers { - digest, off, tagLen := findTag(digest, layer.metadata) - if digest != "" { - position := string(layer.metadata.vdata[off : off+tagLen]) - parts := strings.SplitN(position, ":", 3) - if len(parts) != 3 { - continue + if !layer.cacheFile.bloomFilter.maybeContains(binaryDigest) { + continue + } + found, off, tagLen := findBinaryTag(binaryDigest, layer.cacheFile) + if found { + if uint64(len(layer.cacheFile.vdata)) < off+tagLen { + return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id) } - offFile, _ := strconv.ParseInt(parts[0], 10, 64) + fileLocationData := layer.cacheFile.vdata[off : off+tagLen] + + fnamePosition, offFile, _, err := parseFileLocation(fileLocationData) + if err != nil { + return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id) + } + + if len(layer.cacheFile.fnames) < fnamePosition+4 { + return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id) + } + lenPath := int(binary.LittleEndian.Uint32(layer.cacheFile.fnames[fnamePosition : fnamePosition+4])) + + if len(layer.cacheFile.fnames) < fnamePosition+lenPath+4 { + return "", "", 0, fmt.Errorf("corrupted cache file for layer %q", layer.id) + } + path := string(layer.cacheFile.fnames[fnamePosition+4 : fnamePosition+lenPath+4]) + // parts[1] is the chunk length, currently unused. - return layer.target, parts[2], offFile, nil + return layer.target, path, int64(offFile), nil } } @@ -527,7 +797,7 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, // findFileInOtherLayers finds the specified file in other layers. // file is the file to look for. -func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { +func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks bool) (string, string, error) { digest := file.Digest if useHardLinks { var err error @@ -548,123 +818,95 @@ func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (stri } func unmarshalToc(manifest []byte) (*internal.TOC, error) { - var buf bytes.Buffer - count := 0 var toc internal.TOC iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) - for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - if strings.ToLower(field) != "entries" { - iter.Skip() - continue - } - for iter.ReadArray() { - for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - switch strings.ToLower(field) { - case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime": - count += len(iter.ReadStringAsSlice()) - case "xattrs": - for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { - count += len(iter.ReadStringAsSlice()) - } - default: - iter.Skip() - } - } - } - break - } - - buf.Grow(count) - - getString := func(b []byte) string { - from := buf.Len() - buf.Write(b) - to := buf.Len() - return byteSliceAsString(buf.Bytes()[from:to]) - } - - pool := iter.Pool() - pool.ReturnIterator(iter) - iter = pool.BorrowIterator(manifest) for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - if strings.ToLower(field) == "version" { + switch strings.ToLower(field) { + case "version": toc.Version = iter.ReadInt() - continue - } - if strings.ToLower(field) != "entries" { - iter.Skip() - continue - } - for iter.ReadArray() { - var m internal.FileMetadata - for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - switch strings.ToLower(field) { - case "type": - m.Type = getString(iter.ReadStringAsSlice()) - case "name": - m.Name = getString(iter.ReadStringAsSlice()) - case "linkname": - m.Linkname = getString(iter.ReadStringAsSlice()) - case "mode": - m.Mode = iter.ReadInt64() - case "size": - m.Size = iter.ReadInt64() - case "uid": - m.UID = iter.ReadInt() - case "gid": - m.GID = iter.ReadInt() - case "modtime": - time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) - if err != nil { - return nil, err + + case "entries": + for iter.ReadArray() { + var m internal.FileMetadata + for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { + switch strings.ToLower(field) { + case "type": + m.Type = iter.ReadString() + case "name": + m.Name = iter.ReadString() + case "linkname": + m.Linkname = iter.ReadString() + case "mode": + m.Mode = iter.ReadInt64() + case "size": + m.Size = iter.ReadInt64() + case "uid": + m.UID = iter.ReadInt() + case "gid": + m.GID = iter.ReadInt() + case "modtime": + time, err := time.Parse(time.RFC3339, iter.ReadString()) + if err != nil { + return nil, err + } + m.ModTime = &time + case "accesstime": + time, err := time.Parse(time.RFC3339, iter.ReadString()) + if err != nil { + return nil, err + } + m.AccessTime = &time + case "changetime": + time, err := time.Parse(time.RFC3339, iter.ReadString()) + if err != nil { + return nil, err + } + m.ChangeTime = &time + case "devmajor": + m.Devmajor = iter.ReadInt64() + case "devminor": + m.Devminor = iter.ReadInt64() + case "digest": + m.Digest = iter.ReadString() + case "offset": + m.Offset = iter.ReadInt64() + case "endoffset": + m.EndOffset = iter.ReadInt64() + case "chunksize": + m.ChunkSize = iter.ReadInt64() + case "chunkoffset": + m.ChunkOffset = iter.ReadInt64() + case "chunkdigest": + m.ChunkDigest = iter.ReadString() + case "chunktype": + m.ChunkType = iter.ReadString() + case "xattrs": + m.Xattrs = make(map[string]string) + for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { + m.Xattrs[key] = iter.ReadString() + } + default: + iter.Skip() } - m.ModTime = &time - case "accesstime": - time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) - if err != nil { - return nil, err - } - m.AccessTime = &time - case "changetime": - time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) - if err != nil { - return nil, err - } - m.ChangeTime = &time - case "devmajor": - m.Devmajor = iter.ReadInt64() - case "devminor": - m.Devminor = iter.ReadInt64() - case "digest": - m.Digest = getString(iter.ReadStringAsSlice()) - case "offset": - m.Offset = iter.ReadInt64() - case "endoffset": - m.EndOffset = iter.ReadInt64() - case "chunksize": - m.ChunkSize = iter.ReadInt64() - case "chunkoffset": - m.ChunkOffset = iter.ReadInt64() - case "chunkdigest": - m.ChunkDigest = getString(iter.ReadStringAsSlice()) - case "chunktype": - m.ChunkType = getString(iter.ReadStringAsSlice()) - case "xattrs": - m.Xattrs = make(map[string]string) - for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { - value := iter.ReadStringAsSlice() - m.Xattrs[key] = getString(value) - } - default: - iter.Skip() } + if m.Type == TypeReg && m.Size == 0 && m.Digest == "" { + m.Digest = digestSha256Empty + } + toc.Entries = append(toc.Entries, m) } - if m.Type == TypeReg && m.Size == 0 && m.Digest == "" { - m.Digest = digestSha256Empty + + case "tarsplitdigest": // strings.ToLower("tarSplitDigest") + s := iter.ReadString() + d, err := digest.Parse(s) + if err != nil { + return nil, fmt.Errorf("Invalid tarSplitDigest %q: %w", s, err) } - toc.Entries = append(toc.Entries, m) + toc.TarSplitDigest = d + + default: + iter.Skip() } } @@ -677,6 +919,5 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { return nil, fmt.Errorf("unexpected data after manifest") } - toc.StringsBuf = buf return &toc, nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 112ca2c7c..7b3879a99 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -7,7 +7,6 @@ import ( "io" "strconv" - "github.com/containerd/stargz-snapshotter/estargz" "github.com/containers/storage/pkg/chunked/internal" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" @@ -33,7 +32,7 @@ func typeToTarType(t string) (byte, error) { return r, nil } -func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { +func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) { // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md footerSize := int64(51) if blobSize <= footerSize { @@ -126,91 +125,53 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, return nil, 0, err } - d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) - if err != nil { - return nil, 0, err - } - if manifestDigester.Digest() != d { + if manifestDigester.Digest() != tocDigest { return nil, 0, errors.New("invalid manifest checksum") } return manifestUncompressed, tocOffset, nil } -// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must -// be specified. -// This function uses the io.github.containers.zstd-chunked. annotations when specified. -func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) { - footerSize := int64(internal.FooterSizeSupported) - if blobSize <= footerSize { - return nil, nil, 0, errors.New("blob too small") +// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. +// Returns (manifest blob, parsed manifest, tar-split blob, manifest offset). +func readZstdChunkedManifest(blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string) ([]byte, *internal.TOC, []byte, int64, error) { + offsetMetadata := annotations[internal.ManifestInfoKey] + if offsetMetadata == "" { + return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", internal.ManifestInfoKey) } - - var footerData internal.ZstdChunkedFooterData - - if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { - var err error - footerData, err = internal.ReadFooterDataFromAnnotations(annotations) - if err != nil { - return nil, nil, 0, err - } - } else { - chunk := ImageSourceChunk{ - Offset: uint64(blobSize - footerSize), - Length: uint64(footerSize), - } - parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk}) - if err != nil { - return nil, nil, 0, err - } - var reader io.ReadCloser - select { - case r := <-parts: - reader = r - case err := <-errs: - return nil, nil, 0, err - } - footer := make([]byte, footerSize) - if _, err := io.ReadFull(reader, footer); err != nil { - return nil, nil, 0, err - } - - footerData, err = internal.ReadFooterDataFromBlob(footer) - if err != nil { - return nil, nil, 0, err + var manifestChunk ImageSourceChunk + var manifestLengthUncompressed, manifestType uint64 + if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &manifestChunk.Offset, &manifestChunk.Length, &manifestLengthUncompressed, &manifestType); err != nil { + return nil, nil, nil, 0, err + } + // The tarSplit… values are valid if tarSplitChunk.Offset > 0 + var tarSplitChunk ImageSourceChunk + var tarSplitLengthUncompressed uint64 + if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found { + if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &tarSplitChunk.Offset, &tarSplitChunk.Length, &tarSplitLengthUncompressed); err != nil { + return nil, nil, nil, 0, err } } - if footerData.ManifestType != internal.ManifestTypeCRFS { - return nil, nil, 0, errors.New("invalid manifest type") + if manifestType != internal.ManifestTypeCRFS { + return nil, nil, nil, 0, errors.New("invalid manifest type") } // set a reasonable limit - if footerData.LengthCompressed > (1<<20)*50 { - return nil, nil, 0, errors.New("manifest too big") + if manifestChunk.Length > (1<<20)*50 { + return nil, nil, nil, 0, errors.New("manifest too big") } - if footerData.LengthUncompressed > (1<<20)*50 { - return nil, nil, 0, errors.New("manifest too big") + if manifestLengthUncompressed > (1<<20)*50 { + return nil, nil, nil, 0, errors.New("manifest too big") } - chunk := ImageSourceChunk{ - Offset: footerData.Offset, - Length: footerData.LengthCompressed, + chunks := []ImageSourceChunk{manifestChunk} + if tarSplitChunk.Offset > 0 { + chunks = append(chunks, tarSplitChunk) } - - chunks := []ImageSourceChunk{chunk} - - if footerData.OffsetTarSplit > 0 { - chunkTarSplit := ImageSourceChunk{ - Offset: footerData.OffsetTarSplit, - Length: footerData.LengthCompressedTarSplit, - } - chunks = append(chunks, chunkTarSplit) - } - parts, errs, err := blobStream.GetBlobAt(chunks) if err != nil { - return nil, nil, 0, err + return nil, nil, nil, 0, err } readBlob := func(len uint64) ([]byte, error) { @@ -233,34 +194,39 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann return blob, nil } - manifest, err := readBlob(footerData.LengthCompressed) + manifest, err := readBlob(manifestChunk.Length) if err != nil { - return nil, nil, 0, err + return nil, nil, nil, 0, err } - decodedBlob, err := decodeAndValidateBlob(manifest, footerData.LengthUncompressed, footerData.ChecksumAnnotation) + decodedBlob, err := decodeAndValidateBlob(manifest, manifestLengthUncompressed, tocDigest.String()) if err != nil { - return nil, nil, 0, err + return nil, nil, nil, 0, fmt.Errorf("validating and decompressing TOC: %w", err) } + toc, err := unmarshalToc(decodedBlob) + if err != nil { + return nil, nil, nil, 0, fmt.Errorf("unmarshaling TOC: %w", err) + } + decodedTarSplit := []byte{} - if footerData.OffsetTarSplit > 0 { - tarSplit, err := readBlob(footerData.LengthCompressedTarSplit) + if tarSplitChunk.Offset > 0 { + tarSplit, err := readBlob(tarSplitChunk.Length) if err != nil { - return nil, nil, 0, err + return nil, nil, nil, 0, err } - decodedTarSplit, err = decodeAndValidateBlob(tarSplit, footerData.LengthUncompressedTarSplit, footerData.ChecksumAnnotationTarSplit) + decodedTarSplit, err = decodeAndValidateBlob(tarSplit, tarSplitLengthUncompressed, toc.TarSplitDigest.String()) if err != nil { - return nil, nil, 0, err + return nil, nil, nil, 0, fmt.Errorf("validating and decompressing tar-split: %w", err) } } - return decodedBlob, decodedTarSplit, int64(footerData.Offset), err + return decodedBlob, toc, decodedTarSplit, int64(manifestChunk.Offset), err } func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) { d, err := digest.Parse(expectedCompressedChecksum) if err != nil { - return nil, err + return nil, fmt.Errorf("invalid digest %q: %w", expectedCompressedChecksum, err) } blobDigester := d.Algorithm().Digester() diff --git a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go index d3c105c4d..701b6aa53 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go +++ b/vendor/github.com/containers/storage/pkg/chunked/dump/dump.go @@ -52,7 +52,7 @@ func escaped(val string, escape int) string { if noescapeSpace { hexEscape = !unicode.IsPrint(rune(c)) } else { - hexEscape = !unicode.IsGraphic(rune(c)) + hexEscape = !unicode.IsPrint(rune(c)) || unicode.IsSpace(rune(c)) } } diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index caa581efe..5decbfb63 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -8,7 +8,6 @@ import ( "archive/tar" "bytes" "encoding/binary" - "errors" "fmt" "io" "time" @@ -19,11 +18,9 @@ import ( ) type TOC struct { - Version int `json:"version"` - Entries []FileMetadata `json:"entries"` - - // internal: used by unmarshalToc - StringsBuf bytes.Buffer `json:"-"` + Version int `json:"version"` + Entries []FileMetadata `json:"entries"` + TarSplitDigest digest.Digest `json:"tarSplitDigest,omitempty"` } type FileMetadata struct { @@ -48,9 +45,6 @@ type FileMetadata struct { ChunkOffset int64 `json:"chunkOffset,omitempty"` ChunkDigest string `json:"chunkDigest,omitempty"` ChunkType string `json:"chunkType,omitempty"` - - // internal: computed by mergeTOCEntries. - Chunks []*FileMetadata `json:"-"` } const ( @@ -91,9 +85,10 @@ func GetType(t byte) (string, error) { const ( ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum" ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position" - TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position" + TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // Deprecated: Use the TOC.TarSplitDigest field instead, this annotation is no longer read nor written. + // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. ManifestTypeCRFS = 1 @@ -140,8 +135,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off manifestOffset := offset + zstdSkippableFrameHeader toc := TOC{ - Version: 1, - Entries: metadata, + Version: 1, + Entries: metadata, + TarSplitDigest: tarSplitData.Digest, } json := jsoniter.ConfigCompatibleWithStandardLibrary @@ -177,7 +173,6 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off return err } - outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String() tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize) if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil { @@ -189,11 +184,9 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off Offset: manifestOffset, LengthCompressed: uint64(len(compressedManifest)), LengthUncompressed: uint64(len(manifest)), - ChecksumAnnotation: "", // unused OffsetTarSplit: uint64(tarSplitOffset), LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), - ChecksumAnnotationTarSplit: "", // unused } manifestDataLE := footerDataToBlob(footer) @@ -207,18 +200,22 @@ func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { } // ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer. +// This footer exists to make the blobs self-describing, our implementation +// never reads it: +// Partial pull security hinges on the TOC digest, and that exists as a layer annotation; +// so we are relying on the layer annotations anyway, and doing so means we can avoid +// a round-trip to fetch this binary footer. type ZstdChunkedFooterData struct { ManifestType uint64 Offset uint64 LengthCompressed uint64 LengthUncompressed uint64 - ChecksumAnnotation string // Only used when reading a layer, not when creating it OffsetTarSplit uint64 LengthCompressedTarSplit uint64 LengthUncompressedTarSplit uint64 - ChecksumAnnotationTarSplit string // Only used when reading a layer, not when creating it + ChecksumAnnotationTarSplit string // Deprecated: This field is not a part of the footer and not used for any purpose. } func footerDataToBlob(footer ZstdChunkedFooterData) []byte { @@ -235,49 +232,3 @@ func footerDataToBlob(footer ZstdChunkedFooterData) []byte { return manifestDataLE } - -// ReadFooterDataFromAnnotations reads the zstd:chunked footer data from the given annotations. -func ReadFooterDataFromAnnotations(annotations map[string]string) (ZstdChunkedFooterData, error) { - var footerData ZstdChunkedFooterData - - footerData.ChecksumAnnotation = annotations[ManifestChecksumKey] - if footerData.ChecksumAnnotation == "" { - return footerData, fmt.Errorf("manifest checksum annotation %q not found", ManifestChecksumKey) - } - - offsetMetadata := annotations[ManifestInfoKey] - - if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &footerData.Offset, &footerData.LengthCompressed, &footerData.LengthUncompressed, &footerData.ManifestType); err != nil { - return footerData, err - } - - if tarSplitInfoKeyAnnotation, found := annotations[TarSplitInfoKey]; found { - if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &footerData.OffsetTarSplit, &footerData.LengthCompressedTarSplit, &footerData.LengthUncompressedTarSplit); err != nil { - return footerData, err - } - footerData.ChecksumAnnotationTarSplit = annotations[TarSplitChecksumKey] - } - return footerData, nil -} - -// ReadFooterDataFromBlob reads the zstd:chunked footer from the binary buffer. -func ReadFooterDataFromBlob(footer []byte) (ZstdChunkedFooterData, error) { - var footerData ZstdChunkedFooterData - - if len(footer) < FooterSizeSupported { - return footerData, errors.New("blob too small") - } - footerData.Offset = binary.LittleEndian.Uint64(footer[0:8]) - footerData.LengthCompressed = binary.LittleEndian.Uint64(footer[8:16]) - footerData.LengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) - footerData.ManifestType = binary.LittleEndian.Uint64(footer[24:32]) - footerData.OffsetTarSplit = binary.LittleEndian.Uint64(footer[32:40]) - footerData.LengthCompressedTarSplit = binary.LittleEndian.Uint64(footer[40:48]) - footerData.LengthUncompressedTarSplit = binary.LittleEndian.Uint64(footer[48:56]) - - // the magic number is stored in the last 8 bytes - if !bytes.Equal(ZstdChunkedFrameMagic, footer[len(footer)-len(ZstdChunkedFrameMagic):]) { - return footerData, errors.New("invalid magic number") - } - return footerData, nil -} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index f300df347..e001022cb 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -25,6 +25,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" + "github.com/containers/storage/pkg/chunked/toc" "github.com/containers/storage/pkg/fsverity" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" @@ -58,11 +59,27 @@ const ( copyGoRoutines = 32 ) +// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that +// are not part of the TOC document. +// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk. +type fileMetadata struct { + internal.FileMetadata + + // chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg. + chunks []*internal.FileMetadata + + // skipSetAttrs is set when the file attributes must not be + // modified, e.g. it is a hard link from a different source, + // or a composefs file. + skipSetAttrs bool +} + type compressedFileType int type chunkedDiffer struct { stream ImageSourceSeekable manifest []byte + toc *internal.TOC // The parsed contents of manifest, or nil if not yet available tarSplit []byte layersCache *layersCache tocOffset int64 @@ -138,7 +155,8 @@ func doHardLink(srcFd int, destDirFd int, destBase string) error { return err } -func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { +func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { + destFile := fileMetadata.Name src := fmt.Sprintf("/proc/self/fd/%d", srcFd) st, err := os.Stat(src) if err != nil { @@ -156,6 +174,8 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us err := doHardLink(srcFd, int(destDir.Fd()), destBase) if err == nil { + // if the file was deduplicated with a hard link, skip overriding file metadata. + fileMetadata.skipSetAttrs = true return nil, st.Size(), nil } } @@ -198,15 +218,15 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, return streams, errs, nil } -func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) { +func convertTarToZstdChunked(destDirectory string, payload *os.File) (int64, *seekableFile, digest.Digest, map[string]string, error) { diff, err := archive.DecompressStream(payload) if err != nil { - return nil, "", nil, err + return 0, nil, "", nil, err } fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) if err != nil { - return nil, "", nil, err + return 0, nil, "", nil, err } f := os.NewFile(uintptr(fd), destDirectory) @@ -216,23 +236,24 @@ func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableF chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level) if err != nil { f.Close() - return nil, "", nil, err + return 0, nil, "", nil, err } convertedOutputDigester := digest.Canonical.Digester() - if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil { + copied, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff) + if err != nil { f.Close() - return nil, "", nil, err + return 0, nil, "", nil, err } if err := chunked.Close(); err != nil { f.Close() - return nil, "", nil, err + return 0, nil, "", nil, err } is := seekableFile{ file: f, } - return &is, convertedOutputDigester.Digest(), newAnnotations, nil + return copied, &is, convertedOutputDigester.Digest(), newAnnotations, nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. @@ -246,18 +267,26 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges return nil, errors.New("enable_partial_images not configured") } - _, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] - _, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] + zstdChunkedTOCDigestString, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey] + estargzTOCDigestString, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation] if hasZstdChunkedTOC && hasEstargzTOC { return nil, errors.New("both zstd:chunked and eStargz TOC found") } if hasZstdChunkedTOC { - return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + zstdChunkedTOCDigest, err := digest.Parse(zstdChunkedTOCDigestString) + if err != nil { + return nil, fmt.Errorf("parsing zstd:chunked TOC digest %q: %w", zstdChunkedTOCDigestString, err) + } + return makeZstdChunkedDiffer(ctx, store, blobSize, zstdChunkedTOCDigest, annotations, iss, &storeOpts) } if hasEstargzTOC { - return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + estargzTOCDigest, err := digest.Parse(estargzTOCDigestString) + if err != nil { + return nil, fmt.Errorf("parsing estargz TOC digest %q: %w", estargzTOCDigestString, err) + } + return makeEstargzChunkedDiffer(ctx, store, blobSize, estargzTOCDigest, iss, &storeOpts) } return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts) @@ -285,8 +314,8 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige }, nil } -func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { - manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) +func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, tocDigest, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -295,11 +324,6 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in return nil, err } - tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) - if err != nil { - return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) - } - return &chunkedDiffer{ fsVerityDigests: make(map[string]string), blobSize: blobSize, @@ -308,6 +332,7 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in fileType: fileTypeZstdChunked, layersCache: layersCache, manifest: manifest, + toc: toc, storeOpts: storeOpts, stream: iss, tarSplit: tarSplit, @@ -315,8 +340,8 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in }, nil } -func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { - manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) +func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, tocDigest digest.Digest, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -325,11 +350,6 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize return nil, err } - tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) - if err != nil { - return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) - } - return &chunkedDiffer{ fsVerityDigests: make(map[string]string), blobSize: blobSize, @@ -354,7 +374,7 @@ func makeCopyBuffer() []byte { // name is the path to the file to copy in source. // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. -func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func copyFileFromOtherLayer(file *fileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) if err != nil { return false, nil, 0, fmt.Errorf("open source file: %w", err) @@ -367,7 +387,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str } defer srcFile.Close() - dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) + dstFile, written, err := copyFileContent(int(srcFile.Fd()), file, dirfd, 0, useHardLinks) if err != nil { return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) } @@ -376,7 +396,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str // canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. // It checks that the two files have the same UID, GID, file mode and xattrs. -func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *internal.FileMetadata) bool { +func canDedupMetadataWithHardLink(file *fileMetadata, otherFile *fileMetadata) bool { if file.UID != otherFile.UID { return false } @@ -394,7 +414,7 @@ func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *intern // canDedupFileWithHardLink checks if the specified file can be deduplicated by an // open file, given its descriptor and stat data. -func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo) bool { +func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool { st, ok := s.Sys().(*syscall.Stat_t) if !ok { return false @@ -420,11 +440,13 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo xattrs[x] = string(v) } // fill only the attributes used by canDedupMetadataWithHardLink. - otherFile := internal.FileMetadata{ - UID: int(st.Uid), - GID: int(st.Gid), - Mode: int64(st.Mode), - Xattrs: xattrs, + otherFile := fileMetadata{ + FileMetadata: internal.FileMetadata{ + UID: int(st.Uid), + GID: int(st.Gid), + Mode: int64(st.Mode), + Xattrs: xattrs, + }, } return canDedupMetadataWithHardLink(file, &otherFile) } @@ -434,7 +456,7 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo // ostreeRepos is a list of OSTree repos. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. -func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func findFileInOSTreeRepos(file *fileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { digest, err := digest.Parse(file.Digest) if err != nil { logrus.Debugf("could not parse digest: %v", err) @@ -467,7 +489,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di continue } - dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) + dstFile, written, err := copyFileContent(fd, file, dirfd, 0, useHardLinks) if err != nil { logrus.Debugf("could not copyFileContent: %v", err) return false, nil, 0, nil @@ -487,7 +509,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di // file is the file to look for. // dirfd is an open file descriptor to the checkout root directory. // useHardLinks defines whether the deduplication can be performed using hard links. -func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func findFileInOtherLayers(cache *layersCache, file *fileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { target, name, err := cache.findFileInOtherLayers(file, useHardLinks) if err != nil || name == "" { return false, nil, 0, err @@ -495,7 +517,7 @@ func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirf return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) } -func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { +func maybeDoIDRemap(manifest []fileMetadata, options *archive.TarOptions) error { if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { return nil } @@ -529,7 +551,7 @@ func mapToSlice(inputMap map[uint32]struct{}) []uint32 { return out } -func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) { +func collectIDs(entries []fileMetadata) ([]uint32, []uint32) { uids := make(map[uint32]struct{}) gids := make(map[uint32]struct{}) for _, entry := range entries { @@ -549,7 +571,7 @@ type missingFileChunk struct { Gap int64 Hole bool - File *internal.FileMetadata + File *fileMetadata CompressedSize int64 UncompressedSize int64 @@ -582,7 +604,10 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) { } // setFileAttrs sets the file attributes for file given metadata -func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error { + if metadata.skipSetAttrs { + return nil + } if file == nil || file.Fd() < 0 { return errors.New("invalid file") } @@ -944,14 +969,14 @@ type destinationFile struct { dirfd int file *os.File hash hash.Hash - metadata *internal.FileMetadata + metadata *fileMetadata options *archive.TarOptions skipValidation bool to io.Writer recordFsVerity recordFsVerityFunc } -func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { +func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err @@ -1314,7 +1339,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st return nil } -func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error { parent := filepath.Dir(name) base := filepath.Base(name) @@ -1343,7 +1368,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.File return setFileAttrs(dirfd, file, mode, metadata, options, false) } -func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error { sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) if err != nil { return err @@ -1385,7 +1410,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti return setFileAttrs(dirfd, newFile, mode, metadata, options, false) } -func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func safeSymlink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error { destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { @@ -1473,7 +1498,7 @@ type hardLinkToCreate struct { dest string dirfd int mode os.FileMode - metadata *internal.FileMetadata + metadata *fileMetadata } func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool { @@ -1498,7 +1523,7 @@ func reopenFileReadOnly(f *os.File) (*os.File, error) { return os.NewFile(uintptr(fd), f.Name()), nil } -func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { finalizeFile := func(dstFile *os.File) error { if dstFile == nil { return nil @@ -1549,8 +1574,8 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, cop return false, nil } -func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMetadata, error) { - var new []internal.FileMetadata +func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) { + var new []fileMetadata hashes := make(map[string]string) for i := range mergedEntries { @@ -1572,6 +1597,7 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta hashes[d] = d mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:]) + mergedEntries[i].skipSetAttrs = true new = append(new, mergedEntries[i]) } @@ -1629,6 +1655,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff stream := c.stream var uncompressedDigest digest.Digest + var convertedBlobSize int64 if c.convertToZstdChunked { fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) @@ -1656,10 +1683,11 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff return graphdriver.DriverWithDifferOutput{}, err } - fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile) + tarSize, fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile) if err != nil { return graphdriver.DriverWithDifferOutput{}, err } + convertedBlobSize = tarSize // fileSource is a O_TMPFILE file descriptor, so we // need to keep it open until the entire file is processed. defer fileSource.Close() @@ -1668,7 +1696,14 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff blobFile.Close() blobFile = nil - manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations) + tocDigest, err := toc.GetTOCDigest(annotations) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: parsing just-created zstd:chunked TOC digest: %w", err) + } + if tocDigest == nil { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest") + } + manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, *tocDigest, annotations) if err != nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -1679,6 +1714,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff // fill the chunkedDiffer with the data we just read. c.fileType = fileTypeZstdChunked c.manifest = manifest + c.toc = toc c.tarSplit = tarSplit c.tocOffset = tocOffset @@ -1699,9 +1735,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } // Generate the manifest - toc, err := unmarshalToc(c.manifest) - if err != nil { - return graphdriver.DriverWithDifferOutput{}, err + toc := c.toc + if toc == nil { + toc_, err := unmarshalToc(c.manifest) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + toc = toc_ } output := graphdriver.DriverWithDifferOutput{ @@ -1729,14 +1769,19 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff var missingParts []missingPart - output.UIDs, output.GIDs = collectIDs(toc.Entries) - - mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries) + mergedEntries, totalSizeFromTOC, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { return output, err } - output.Size = totalSize + output.UIDs, output.GIDs = collectIDs(mergedEntries) + if convertedBlobSize > 0 { + // if the image was converted, store the original tar size, so that + // it can be recreated correctly. + output.Size = convertedBlobSize + } else { + output.Size = totalSizeFromTOC + } if err := maybeDoIDRemap(mergedEntries, options); err != nil { return output, err @@ -1789,7 +1834,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff njob int index int mode os.FileMode - metadata *internal.FileMetadata + metadata *fileMetadata found bool err error @@ -1961,7 +2006,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff remainingSize := r.Size // the file is missing, attempt to find individual chunks. - for _, chunk := range r.Chunks { + for _, chunk := range r.chunks { compressedSize := int64(chunk.EndOffset - chunk.Offset) size := remainingSize if chunk.ChunkSize > 0 { @@ -2045,7 +2090,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { return false } -func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, int64, error) { +func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, int64, error) { var totalFilesSize int64 countNextChunks := func(start int) int { @@ -2069,11 +2114,11 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i } } - mergedEntries := make([]internal.FileMetadata, size) + mergedEntries := make([]fileMetadata, size) m := 0 for i := 0; i < len(entries); i++ { - e := entries[i] - if mustSkipFile(fileType, e) { + e := fileMetadata{FileMetadata: entries[i]} + if mustSkipFile(fileType, entries[i]) { continue } @@ -2086,12 +2131,12 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i if e.Type == TypeReg { nChunks := countNextChunks(i + 1) - e.Chunks = make([]*internal.FileMetadata, nChunks+1) + e.chunks = make([]*internal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { // we need a copy here, otherwise we override the // .Size later copy := entries[i+j] - e.Chunks[j] = © + e.chunks[j] = © e.EndOffset = entries[i+j].EndOffset } i += nChunks @@ -2110,10 +2155,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i } lastChunkOffset := mergedEntries[i].EndOffset - for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { - mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset - mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset - lastChunkOffset = mergedEntries[i].Chunks[j].Offset + for j := len(mergedEntries[i].chunks) - 1; j >= 0; j-- { + mergedEntries[i].chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].chunks[j].Size = mergedEntries[i].chunks[j].EndOffset - mergedEntries[i].chunks[j].Offset + lastChunkOffset = mergedEntries[i].chunks[j].Offset } } return mergedEntries, totalFilesSize, nil diff --git a/vendor/github.com/containers/storage/pkg/config/config.go b/vendor/github.com/containers/storage/pkg/config/config.go index febe8a0c5..7f49d029d 100644 --- a/vendor/github.com/containers/storage/pkg/config/config.go +++ b/vendor/github.com/containers/storage/pkg/config/config.go @@ -5,72 +5,6 @@ import ( "os" ) -// ThinpoolOptionsConfig represents the "storage.options.thinpool" -// TOML config table. -type ThinpoolOptionsConfig struct { - // AutoExtendPercent determines the amount by which pool needs to be - // grown. This is specified in terms of % of pool size. So a value of - // 20 means that when threshold is hit, pool will be grown by 20% of - // existing pool size. - AutoExtendPercent string `toml:"autoextend_percent,omitempty"` - - // AutoExtendThreshold determines the pool extension threshold in terms - // of percentage of pool size. For example, if threshold is 60, that - // means when pool is 60% full, threshold has been hit. - AutoExtendThreshold string `toml:"autoextend_threshold,omitempty"` - - // BaseSize specifies the size to use when creating the base device, - // which limits the size of images and containers. - BaseSize string `toml:"basesize,omitempty"` - - // BlockSize specifies a custom blocksize to use for the thin pool. - BlockSize string `toml:"blocksize,omitempty"` - - // DirectLvmDevice specifies a custom block storage device to use for - // the thin pool. - DirectLvmDevice string `toml:"directlvm_device,omitempty"` - - // DirectLvmDeviceForcewipes device even if device already has a - // filesystem - DirectLvmDeviceForce string `toml:"directlvm_device_force,omitempty"` - - // Fs specifies the filesystem type to use for the base device. - Fs string `toml:"fs,omitempty"` - - // log_level sets the log level of devicemapper. - LogLevel string `toml:"log_level,omitempty"` - - // MetadataSize specifies the size of the metadata for the thinpool - // It will be used with the `pvcreate --metadata` option. - MetadataSize string `toml:"metadatasize,omitempty"` - - // MinFreeSpace specifies the min free space percent in a thin pool - // require for new device creation to - MinFreeSpace string `toml:"min_free_space,omitempty"` - - // MkfsArg specifies extra mkfs arguments to be used when creating the - // basedevice. - MkfsArg string `toml:"mkfsarg,omitempty"` - - // MountOpt specifies extra mount options used when mounting the thin - // devices. - MountOpt string `toml:"mountopt,omitempty"` - - // Size - Size string `toml:"size,omitempty"` - - // UseDeferredDeletion marks device for deferred deletion - UseDeferredDeletion string `toml:"use_deferred_deletion,omitempty"` - - // UseDeferredRemoval marks device for deferred removal - UseDeferredRemoval string `toml:"use_deferred_removal,omitempty"` - - // XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of - // retries XFS should attempt to complete IO when ENOSPC (no space) - // error is returned by underlying storage device. - XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries,omitempty"` -} - type AufsOptionsConfig struct { // MountOpt specifies extra mount options used when mounting MountOpt string `toml:"mountopt,omitempty"` @@ -181,8 +115,8 @@ type OptionsConfig struct { // Btrfs container options to be handed to btrfs drivers Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` - // Thinpool container options to be handed to thinpool drivers - Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool,omitempty"` + // Thinpool container options to be handed to thinpool drivers (NOP) + Thinpool struct{} `toml:"thinpool,omitempty"` // Overlay container options to be handed to overlay drivers Overlay struct{ OverlayOptionsConfig } `toml:"overlay,omitempty"` @@ -231,62 +165,6 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) } - case "devicemapper": - if options.Thinpool.AutoExtendPercent != "" { - doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent)) - } - if options.Thinpool.AutoExtendThreshold != "" { - doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold)) - } - if options.Thinpool.BaseSize != "" { - doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize)) - } - if options.Thinpool.BlockSize != "" { - doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize)) - } - if options.Thinpool.DirectLvmDevice != "" { - doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice)) - } - if options.Thinpool.DirectLvmDeviceForce != "" { - doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce)) - } - if options.Thinpool.Fs != "" { - doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs)) - } - if options.Thinpool.LogLevel != "" { - doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel)) - } - if options.Thinpool.MetadataSize != "" { - doptions = append(doptions, fmt.Sprintf("dm.metadata_size=%s", options.Thinpool.MetadataSize)) - } - if options.Thinpool.MinFreeSpace != "" { - doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace)) - } - if options.Thinpool.MkfsArg != "" { - doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg)) - } - if options.Thinpool.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt)) - } else if options.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) - } - - if options.Thinpool.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size)) - } else if options.Size != "" { - doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size)) - } - - if options.Thinpool.UseDeferredDeletion != "" { - doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion)) - } - if options.Thinpool.UseDeferredRemoval != "" { - doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval)) - } - if options.Thinpool.XfsNoSpaceMaxRetries != "" { - doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries)) - } - case "overlay", "overlay2": // Specify whether composefs must be used to mount the data layers if options.Overlay.IgnoreChownErrors != "" { diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go deleted file mode 100644 index 33bf7184e..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ /dev/null @@ -1,813 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devicemapper - -import ( - "errors" - "fmt" - "os" - "runtime" - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Same as DM_DEVICE_* enum values from libdevmapper.h -// nolint: unused -const ( - deviceCreate TaskType = iota - deviceReload - deviceRemove - deviceRemoveAll - deviceSuspend - deviceResume - deviceInfo - deviceDeps - deviceRename - deviceVersion - deviceStatus - deviceTable - deviceWaitevent - deviceList - deviceClear - deviceMknodes - deviceListVersions - deviceTargetMsg - deviceSetGeometry -) - -const ( - addNodeOnResume AddNodeType = iota - addNodeOnCreate -) - -// List of errors returned when using devicemapper. -var ( - ErrTaskRun = errors.New("dm_task_run failed") - ErrTaskSetName = errors.New("dm_task_set_name failed") - ErrTaskSetMessage = errors.New("dm_task_set_message failed") - ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") - ErrTaskSetRo = errors.New("dm_task_set_ro failed") - ErrTaskAddTarget = errors.New("dm_task_add_target failed") - ErrTaskSetSector = errors.New("dm_task_set_sector failed") - ErrTaskGetDeps = errors.New("dm_task_get_deps failed") - ErrTaskGetInfo = errors.New("dm_task_get_info failed") - ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") - ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") - ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") - ErrNilCookie = errors.New("cookie ptr can't be nil") - ErrGetBlockSize = errors.New("Can't get block size") - ErrUdevWait = errors.New("wait on udev cookie failed") - ErrSetDevDir = errors.New("dm_set_dev_dir failed") - ErrGetLibraryVersion = errors.New("dm_get_library_version failed") - ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") - ErrRunRemoveDevice = errors.New("running RemoveDevice failed") - ErrInvalidAddNode = errors.New("Invalid AddNode type") - ErrBusy = errors.New("Device is Busy") - ErrDeviceIDExists = errors.New("Device Id Exists") - ErrEnxio = errors.New("No such device or address") -) - -var ( - dmSawBusy bool - dmSawExist bool - dmSawEnxio bool // No Such Device or Address -) - -type ( - // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl - // command to execute. - Task struct { - unmanaged *cdmTask - } - // Deps represents dependents (layer) of a device. - Deps struct { - Count uint32 - Filler uint32 - Device []uint64 - } - // Info represents information about a device. - Info struct { - Exists int - Suspended int - LiveTable int - InactiveTable int - OpenCount int32 - EventNr uint32 - Major uint32 - Minor uint32 - ReadOnly int - TargetCount int32 - DeferredRemove int - } - // TaskType represents a type of task - TaskType int - // AddNodeType represents a type of node to be added - AddNodeType int -) - -// DeviceIDExists returns whether error conveys the information about device Id already -// exist or not. This will be true if device creation or snap creation -// operation fails if device or snap device already exists in pool. -// Current implementation is little crude as it scans the error string -// for exact pattern match. Replacing it with more robust implementation -// is desirable. -func DeviceIDExists(err error) bool { - return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) -} - -func (t *Task) destroy() { - if t != nil { - DmTaskDestroy(t.unmanaged) - runtime.SetFinalizer(t, nil) - } -} - -// TaskCreateNamed is a convenience function for TaskCreate when a name -// will be set on the task as well -func TaskCreateNamed(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) - } - if err := task.setName(name); err != nil { - return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) - } - return task, nil -} - -// TaskCreate initializes a devicemapper task of tasktype -func TaskCreate(tasktype TaskType) *Task { - Ctask := DmTaskCreate(int(tasktype)) - if Ctask == nil { - return nil - } - task := &Task{unmanaged: Ctask} - runtime.SetFinalizer(task, (*Task).destroy) - return task -} - -func (t *Task) run() error { - if res := DmTaskRun(t.unmanaged); res != 1 { - return ErrTaskRun - } - runtime.KeepAlive(t) - return nil -} - -func (t *Task) setName(name string) error { - if res := DmTaskSetName(t.unmanaged, name); res != 1 { - return ErrTaskSetName - } - return nil -} - -func (t *Task) setMessage(message string) error { - if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { - return ErrTaskSetMessage - } - return nil -} - -func (t *Task) setSector(sector uint64) error { - if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { - return ErrTaskSetSector - } - return nil -} - -func (t *Task) setCookie(cookie *uint, flags uint16) error { - if cookie == nil { - return ErrNilCookie - } - if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { - return ErrTaskSetCookie - } - return nil -} - -func (t *Task) setAddNode(addNode AddNodeType) error { - if addNode != addNodeOnResume && addNode != addNodeOnCreate { - return ErrInvalidAddNode - } - if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { - return ErrTaskSetAddNode - } - return nil -} - -func (t *Task) addTarget(start, size uint64, ttype, params string) error { - if res := DmTaskAddTarget(t.unmanaged, start, size, - ttype, params); res != 1 { - return ErrTaskAddTarget - } - return nil -} - -func (t *Task) getDeps() (*Deps, error) { //nolint:unused - var deps *Deps - if deps = DmTaskGetDeps(t.unmanaged); deps == nil { - return nil, ErrTaskGetDeps - } - return deps, nil -} - -func (t *Task) getInfo() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getInfoWithDeferred() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getDriverVersion() (string, error) { - res := DmTaskGetDriverVersion(t.unmanaged) - if res == "" { - return "", ErrTaskGetDriverVersion - } - return res, nil -} - -func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, - length uint64, targetType string, params string, -) { - return DmGetNextTarget(t.unmanaged, next, &start, &length, - &targetType, ¶ms), - start, length, targetType, params -} - -// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. -func UdevWait(cookie *uint) error { - if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) - return ErrUdevWait - } - return nil -} - -// SetDevDir sets the dev folder for the device mapper library (usually /dev). -func SetDevDir(dir string) error { - if res := DmSetDevDir(dir); res != 1 { - logrus.Debug("devicemapper: Error dm_set_dev_dir") - return ErrSetDevDir - } - return nil -} - -// GetLibraryVersion returns the device mapper library version. -func GetLibraryVersion() (string, error) { - var version string - if res := DmGetLibraryVersion(&version); res != 1 { - return "", ErrGetLibraryVersion - } - return version, nil -} - -// UdevSyncSupported returns whether device-mapper is able to sync with udev -// -// This is essential otherwise race conditions can arise where both udev and -// device-mapper attempt to create and destroy devices. -func UdevSyncSupported() bool { - return DmUdevGetSyncSupport() != 0 -} - -// UdevSetSyncSupport allows setting whether the udev sync should be enabled. -// The return bool indicates the state of whether the sync is enabled. -func UdevSetSyncSupport(enable bool) bool { - if enable { - DmUdevSetSyncSupport(1) - } else { - DmUdevSetSyncSupport(0) - } - - return UdevSyncSupported() -} - -// CookieSupported returns whether the version of device-mapper supports the -// use of cookie's in the tasks. -// This is largely a lower level call that other functions use. -func CookieSupported() bool { - return DmCookieSupported() != 0 -} - -// RemoveDevice is a useful helper for cleaning up a device. -func RemoveDevice(name string) error { - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - defer UdevWait(cookie) - - dmSawBusy = false // reset before the task is run - dmSawEnxio = false - if err = task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) - } - - return nil -} - -// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. -func RemoveDeviceDeferred(name string) error { - logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) - defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { - return ErrTaskDeferredRemove - } - - // set a task cookie and disable library fallback, or else libdevmapper will - // disable udev dm rules and delete the symlink under /dev/mapper by itself, - // even if the removal is deferred by the kernel. - cookie := new(uint) - flags := uint16(DmUdevDisableLibraryFallback) - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - - // libdevmapper and udev relies on System V semaphore for synchronization, - // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. - // So these two function call must come in pairs, otherwise semaphores will - // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` - // will be reached, which will eventually make all following calls to 'task.SetCookie' - // fail. - // this call will not wait for the deferred removal's final executing, since no - // udev event will be generated, and the semaphore's value will not be incremented - // by udev, what UdevWait is just cleaning up the semaphore. - defer UdevWait(cookie) - - dmSawEnxio = false - if err = task.run(); err != nil { - if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) - } - - return nil -} - -// CancelDeferredRemove cancels a deferred remove for a device. -func CancelDeferredRemove(deviceName string) error { - task, err := TaskCreateNamed(deviceTargetMsg, deviceName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage("@cancel_deferred_remove"); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - dmSawEnxio = false - if err := task.run(); err != nil { - // A device might be being deleted already - if dmSawBusy { - return ErrBusy - } else if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) - - } - return nil -} - -// GetBlockDeviceSize returns the size of a block device identified by the specified file. -func GetBlockDeviceSize(file *os.File) (uint64, error) { - size, err := ioctlBlkGetSize64(file.Fd()) - if err != nil { - logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) - return 0, ErrGetBlockSize - } - return uint64(size), nil -} - -// BlockDeviceDiscard runs discard for the given path. -// This is used as a workaround for the kernel not discarding block so -// on the thin pool when we remove a thinp device, so we do it -// manually -func BlockDeviceDiscard(path string) error { - file, err := os.OpenFile(path, os.O_RDWR, 0) - if err != nil { - return err - } - defer file.Close() - - size, err := GetBlockDeviceSize(file) - if err != nil { - return err - } - - if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { - return err - } - - // Without this sometimes the remove of the device that happens after - // discard fails with EBUSY. - unix.Sync() - - return nil -} - -// CreatePool is the programmatic example of "dmsetup create". -// It creates a device with the specified poolName, data and metadata file and block size. -func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceCreate, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - cookie := new(uint) - flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag) - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) - } - - return nil -} - -// ReloadPool is the programmatic example of "dmsetup reload". -// It reloads the table with the specified poolName, data and metadata file and block size. -func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceReload, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) - } - - return nil -} - -// GetDeps is the programmatic example of "dmsetup deps". -// It outputs a list of devices referenced by the live table for the specified device. -func GetDeps(name string) (*Deps, error) { - task, err := TaskCreateNamed(deviceDeps, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getDeps() -} - -// GetInfo is the programmatic example of "dmsetup info". -// It outputs some brief information about the device. -func GetInfo(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfo() -} - -// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. -// It outputs some brief information about the device. -func GetInfoWithDeferred(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfoWithDeferred() -} - -// GetDriverVersion is the programmatic example of "dmsetup version". -// It outputs version information of the driver. -func GetDriverVersion() (string, error) { - task := TaskCreate(deviceVersion) - if task == nil { - return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") - } - if err := task.run(); err != nil { - return "", err - } - return task.getDriverVersion() -} - -// GetStatus is the programmatic example of "dmsetup status". -// It outputs status information for the specified device name. -func GetStatus(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceStatus, name) - if task == nil { - logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// GetTable is the programmatic example for "dmsetup table". -// It outputs the current table for the specified device name. -func GetTable(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceTable, name) - if task == nil { - logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// SetTransactionID sets a transaction id for the specified device name. -func SetTransactionID(poolName string, oldID uint64, newID uint64) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) - } - return nil -} - -// SuspendDevice is the programmatic example of "dmsetup suspend". -// It suspends the specified device. -func SuspendDevice(name string) error { - task, err := TaskCreateNamed(deviceSuspend, name) - if task == nil { - return err - } - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) - } - return nil -} - -// ResumeDevice is the programmatic example of "dmsetup resume". -// It un-suspends the specified device. -func ResumeDevice(name string) error { - task, err := TaskCreateNamed(deviceResume, name) - if task == nil { - return err - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceResume %s", err) - } - - return nil -} - -// CreateDevice creates a device with the specified poolName with the specified device id. -func CreateDevice(poolName string, deviceID int) error { - logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - - return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) - - } - return nil -} - -// DeleteDevice deletes a device with the specified poolName with the specified device id. -func DeleteDevice(poolName string, deviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - if err := task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) - } - return nil -} - -// ActivateDevice activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { - return activateDevice(poolName, name, deviceID, size, "") -} - -// ActivateDeviceWithExternal activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { - return activateDevice(poolName, name, deviceID, size, external) -} - -func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { - task, err := TaskCreateNamed(deviceCreate, name) - if task == nil { - return err - } - - var params string - if len(external) > 0 { - params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) - } else { - params = fmt.Sprintf("%s %d", poolName, deviceID) - } - if err := task.addTarget(0, size/512, "thin", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - if err := task.setAddNode(addNodeOnCreate); err != nil { - return fmt.Errorf("devicemapper: Can't add node %s", err) - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) - } - - return nil -} - -// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. -func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) - } - - return nil -} - -// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, -func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { - devinfo, _ := GetInfo(baseName) - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err := SuspendDevice(baseName); err != nil { - return err - } - } - - if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { - if doSuspend { - if err2 := ResumeDevice(baseName); err2 != nil { - return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: %w", err, err2) - } - } - return err - } - - if doSuspend { - if err := ResumeDevice(baseName); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go deleted file mode 100644 index 6cfef0a5b..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go +++ /dev/null @@ -1,123 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devicemapper - -import "C" - -import ( - "fmt" - "strings" - - "github.com/sirupsen/logrus" -) - -// DevmapperLogger defines methods required to register as a callback for -// logging events received from devicemapper. Note that devicemapper will send -// *all* logs regardless to callbacks (including debug logs) so it's -// recommended to not spam the console with the outputs. -type DevmapperLogger interface { - // DMLog is the logging callback containing all of the information from - // devicemapper. The interface is identical to the C libdm counterpart. - DMLog(level int, file string, line int, dmError int, message string) -} - -// dmLogger is the current logger in use that is being forwarded our messages. -var dmLogger DevmapperLogger - -// LogInit changes the logging callback called after processing libdm logs for -// error message information. The default logger simply forwards all logs to -// logrus. Calling LogInit(nil) disables the calling of callbacks. -func LogInit(logger DevmapperLogger) { - dmLogger = logger -} - -// Due to the way cgo works this has to be in a separate file, as devmapper.go has -// definitions in the cgo block, which is incompatible with using "//export" - -// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that -// because we are using callbacks, this function will be called for *every* log -// in libdm (even debug ones because there's no way of setting the verbosity -// level for an external logging callback). -// -//export StorageDevmapperLogCallback -func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { - msg := C.GoString(message) - - // Track what errno libdm saw, because the library only gives us 0 or 1. - if level < LogLevelDebug { - if strings.Contains(msg, "busy") { - dmSawBusy = true - } - - if strings.Contains(msg, "File exists") { - dmSawExist = true - } - - if strings.Contains(msg, "No such device or address") { - dmSawEnxio = true - } - } - - if dmLogger != nil { - dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) - } -} - -// DefaultLogger is the default logger used by pkg/devicemapper. It forwards -// all logs that are of higher or equal priority to the given level to the -// corresponding logrus level. -type DefaultLogger struct { - // Level corresponds to the highest libdm level that will be forwarded to - // logrus. In order to change this, register a new DefaultLogger. - Level int -} - -// DMLog is the logging callback containing all of the information from -// devicemapper. The interface is identical to the C libdm counterpart. -func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { - if level <= l.Level { - // Forward the log to the correct logrus level, if allowed by dmLogLevel. - logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - switch level { - case LogLevelFatal, LogLevelErr: - logrus.Error(logMsg) - case LogLevelWarn: - logrus.Warn(logMsg) - case LogLevelNotice, LogLevelInfo: - logrus.Info(logMsg) - case LogLevelDebug: - logrus.Debug(logMsg) - default: - // Don't drop any "unknown" levels. - logrus.Info(logMsg) - } - } -} - -// registerLogCallback registers our own logging callback function for libdm -// (which is StorageDevmapperLogCallback). -// -// Because libdm only gives us {0,1} error codes we need to parse the logs -// produced by libdm (to set dmSawBusy and so on). Note that by registering a -// callback using StorageDevmapperLogCallback, libdm will no longer output logs to -// stderr so we have to log everything ourselves. None of this handling is -// optional because we depend on log callbacks to parse the logs, and if we -// don't forward the log information we'll be in a lot of trouble when -// debugging things. -func registerLogCallback() { - LogWithErrnoInit() -} - -func init() { - // Use the default logger by default. We only allow LogLevelFatal by - // default, because internally we mask a lot of libdm errors by retrying - // and similar tricks. Also, libdm is very chatty and we don't want to - // worry users for no reason. - dmLogger = DefaultLogger{ - Level: LogLevelFatal, - } - - // Register as early as possible so we don't miss anything. - registerLogCallback() -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go deleted file mode 100644 index 9aef4c2fb..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ /dev/null @@ -1,252 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devicemapper - -/* -#define _GNU_SOURCE -#include -#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? - -// FIXME: Can't we find a way to do the logging in pure Go? -extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); - -static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) -{ - char *buffer = NULL; - va_list ap; - int ret; - - va_start(ap, f); - ret = vasprintf(&buffer, f, ap); - va_end(ap); - if (ret < 0) { - // memory allocation failed -- should never happen? - return; - } - - StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); - free(buffer); -} - -static void log_with_errno_init() -{ - dm_log_with_errno_init(log_cb); -} -*/ -import "C" - -import ( - "reflect" - "unsafe" -) - -type ( - cdmTask C.struct_dm_task -) - -// IOCTL consts -const ( - BlkGetSize64 = C.BLKGETSIZE64 - BlkDiscard = C.BLKDISCARD -) - -// Devicemapper cookie flags. -const ( - DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG - DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG - DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG - DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK -) - -// DeviceMapper mapped functions. -var ( - DmGetLibraryVersion = dmGetLibraryVersionFct - DmGetNextTarget = dmGetNextTargetFct - DmSetDevDir = dmSetDevDirFct - DmTaskAddTarget = dmTaskAddTargetFct - DmTaskCreate = dmTaskCreateFct - DmTaskDestroy = dmTaskDestroyFct - DmTaskGetDeps = dmTaskGetDepsFct - DmTaskGetInfo = dmTaskGetInfoFct - DmTaskGetDriverVersion = dmTaskGetDriverVersionFct - DmTaskRun = dmTaskRunFct - DmTaskSetAddNode = dmTaskSetAddNodeFct - DmTaskSetCookie = dmTaskSetCookieFct - DmTaskSetMessage = dmTaskSetMessageFct - DmTaskSetName = dmTaskSetNameFct - DmTaskSetRo = dmTaskSetRoFct - DmTaskSetSector = dmTaskSetSectorFct - DmUdevWait = dmUdevWaitFct - DmUdevSetSyncSupport = dmUdevSetSyncSupportFct - DmUdevGetSyncSupport = dmUdevGetSyncSupportFct - DmCookieSupported = dmCookieSupportedFct - LogWithErrnoInit = logWithErrnoInitFct - DmTaskDeferredRemove = dmTaskDeferredRemoveFct - DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct -) - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func dmTaskDestroyFct(task *cdmTask) { - C.dm_task_destroy((*C.struct_dm_task)(task)) -} - -func dmTaskCreateFct(taskType int) *cdmTask { - return (*cdmTask)(C.dm_task_create(C.int(taskType))) -} - -func dmTaskRunFct(task *cdmTask) int { - ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) - return int(ret) -} - -func dmTaskSetNameFct(task *cdmTask, name string) int { - Cname := C.CString(name) - defer free(Cname) - - return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) -} - -func dmTaskSetMessageFct(task *cdmTask, message string) int { - Cmessage := C.CString(message) - defer free(Cmessage) - - return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) -} - -func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { - return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) -} - -func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { - cCookie := C.uint32_t(*cookie) - defer func() { - *cookie = uint(cCookie) - }() - return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) -} - -func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { - return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) -} - -func dmTaskSetRoFct(task *cdmTask) int { - return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) -} - -func dmTaskAddTargetFct(task *cdmTask, - start, size uint64, ttype, params string, -) int { - Cttype := C.CString(ttype) - defer free(Cttype) - - Cparams := C.CString(params) - defer free(Cparams) - - return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) -} - -func dmTaskGetDepsFct(task *cdmTask) *Deps { - Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) - if Cdeps == nil { - return nil - } - - // golang issue: https://github.com/golang/go/issues/11925 - var devices []C.uint64_t - devicesHdr := (*reflect.SliceHeader)(unsafe.Pointer(&devices)) - devicesHdr.Data = uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))) - devicesHdr.Len = int(Cdeps.count) - devicesHdr.Cap = int(Cdeps.count) - - deps := &Deps{ - Count: uint32(Cdeps.count), - Filler: uint32(Cdeps.filler), - } - for _, device := range devices { - deps.Device = append(deps.Device, uint64(device)) - } - return deps -} - -func dmTaskGetInfoFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} - -func dmTaskGetDriverVersionFct(task *cdmTask) string { - buffer := C.malloc(128) - defer C.free(buffer) - res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) - if res == 0 { - return "" - } - return C.GoString((*C.char)(buffer)) -} - -func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { - var ( - Cstart, Clength C.uint64_t - CtargetType, Cparams *C.char - ) - defer func() { - *start = uint64(Cstart) - *length = uint64(Clength) - *target = C.GoString(CtargetType) - *params = C.GoString(Cparams) - }() - - nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) - return nextp -} - -func dmUdevSetSyncSupportFct(syncWithUdev int) { - (C.dm_udev_set_sync_support(C.int(syncWithUdev))) -} - -func dmUdevGetSyncSupportFct() int { - return int(C.dm_udev_get_sync_support()) -} - -func dmUdevWaitFct(cookie uint) int { - return int(C.dm_udev_wait(C.uint32_t(cookie))) -} - -func dmCookieSupportedFct() int { - return int(C.dm_cookie_supported()) -} - -func logWithErrnoInitFct() { - C.log_with_errno_init() -} - -func dmSetDevDirFct(dir string) int { - Cdir := C.CString(dir) - defer free(Cdir) - - return int(C.dm_set_dev_dir(Cdir)) -} - -func dmGetLibraryVersionFct(version *string) int { - buffer := C.CString(string(make([]byte, 128))) - defer free(buffer) - defer func() { - *version = C.GoString(buffer) - }() - return int(C.dm_get_library_version(buffer, 128)) -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go deleted file mode 100644 index 071f7f35b..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build linux && cgo && !libdm_no_deferred_remove -// +build linux,cgo,!libdm_no_deferred_remove - -package devicemapper - -// #include -import "C" - -// LibraryDeferredRemovalSupport tells if the feature is enabled in the build -const LibraryDeferredRemovalSupport = true - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - info.DeferredRemove = int(Cinfo.deferred_remove) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go deleted file mode 100644 index 93dcc3221..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build linux && cgo && !static_build -// +build linux,cgo,!static_build - -package devicemapper - -// #cgo pkg-config: devmapper -import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go deleted file mode 100644 index 91906f2ef..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build linux && cgo && libdm_no_deferred_remove -// +build linux,cgo,libdm_no_deferred_remove - -package devicemapper - -// LibraryDeferredRemovalSupport tells if the feature is enabled in the build -const LibraryDeferredRemovalSupport = false - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - // Error. Nobody should be calling it. - return -1 -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go deleted file mode 100644 index 68ea48fe5..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build linux && cgo && static_build -// +build linux,cgo,static_build - -package devicemapper - -// #cgo pkg-config: --static devmapper -import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go deleted file mode 100644 index 90ffe2c3f..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build linux && cgo -// +build linux,cgo - -package devicemapper - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -func ioctlBlkGetSize64(fd uintptr) (int64, error) { - var size int64 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { - return 0, err - } - return size, nil -} - -func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { - var r [2]uint64 - r[0] = offset - r[1] = length - - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go deleted file mode 100644 index cee5e5454..000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/log.go +++ /dev/null @@ -1,11 +0,0 @@ -package devicemapper - -// definitions from lvm2 lib/log/log.h -const ( - LogLevelFatal = 2 + iota // _LOG_FATAL - LogLevelErr // _LOG_ERR - LogLevelWarn // _LOG_WARN - LogLevelNotice // _LOG_NOTICE - LogLevelInfo // _LOG_INFO - LogLevelDebug // _LOG_DEBUG -) diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go deleted file mode 100644 index e883d25f5..000000000 --- a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build linux -// +build linux - -package dmesg - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Dmesg returns last messages from the kernel log, up to size bytes -func Dmesg(size int) []byte { - t := uintptr(3) // SYSLOG_ACTION_READ_ALL - b := make([]byte, size) - amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) - if err != 0 { - return []byte{} - } - return b[:amt] -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go new file mode 100644 index 000000000..f3087d7df --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go @@ -0,0 +1,34 @@ +//go:build !windows +// +build !windows + +package fileutils + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// Exists checks whether a file or directory exists at the given path. +// If the path is a symlink, the symlink is followed. +func Exists(path string) error { + // It uses unix.Faccessat which is a faster operation compared to os.Stat for + // simply checking the existence of a file. + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) + if err != nil { + return &os.PathError{Op: "faccessat", Path: path, Err: err} + } + return nil +} + +// Lexists checks whether a file or directory exists at the given path. +// If the path is a symlink, the symlink itself is checked. +func Lexists(path string) error { + // It uses unix.Faccessat which is a faster operation compared to os.Stat for + // simply checking the existence of a file. + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + return &os.PathError{Op: "faccessat", Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go new file mode 100644 index 000000000..355cf0464 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go @@ -0,0 +1,18 @@ +package fileutils + +import ( + "os" +) + +// Exists checks whether a file or directory exists at the given path. +func Exists(path string) error { + _, err := os.Stat(path) + return err +} + +// Lexists checks whether a file or directory exists at the given path, without +// resolving symlinks +func Lexists(path string) error { + _, err := os.Lstat(path) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index 9d0714b1b..85ce2d526 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -344,7 +344,7 @@ func ReadSymlinkedPath(path string) (realPath string, err error) { if realPath, err = filepath.EvalSymlinks(realPath); err != nil { return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) } - if _, err := os.Stat(realPath); err != nil { + if err := Exists(realPath); err != nil { return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err) } return realPath, nil @@ -352,7 +352,7 @@ func ReadSymlinkedPath(path string) (realPath string, err error) { // CreateIfNotExists creates a file or a directory only if it does not already exist. func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { + if err := Exists(path); err != nil { if os.IsNotExist(err) { if isDir { return os.MkdirAll(path, 0o755) diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go index 1e2d5bcc3..ef5a95254 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -228,7 +228,7 @@ func getOverflowUID() int { return overflowUID } -// getOverflowUID returns the GID mapped to the overflow user +// getOverflowGID returns the GID mapped to the overflow user func getOverflowGID() int { overflowGIDOnce.Do(func() { // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index d7cb4ac2f..7900af38a 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -13,6 +13,7 @@ import ( "sync" "syscall" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/system" "github.com/moby/sys/user" ) @@ -55,7 +56,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown if dirPath == "/" { break } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + if err := fileutils.Exists(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go index 5dd674108..510147578 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -133,11 +133,25 @@ func (l *LockFile) Lock() { } } -// LockRead locks the lockfile as a reader. +// RLock locks the lockfile as a reader. func (l *LockFile) RLock() { l.lock(readLock) } +// TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one. +func (l *LockFile) TryLock() error { + if l.ro { + panic("can't take write lock on read-only lock file") + } else { + return l.tryLock(writeLock) + } +} + +// TryRLock attempts to lock the lockfile as a reader. +func (l *LockFile) TryRLock() error { + return l.tryLock(readLock) +} + // Unlock unlocks the lockfile. func (l *LockFile) Unlock() { l.stateMutex.Lock() @@ -401,9 +415,47 @@ func (l *LockFile) lock(lType lockType) { // Optimization: only use the (expensive) syscall when // the counter is 0. In this case, we're either the first // reader lock or a writer lock. - lockHandle(l.fd, lType) + lockHandle(l.fd, lType, false) } l.lockType = lType l.locked = true l.counter++ } + +// lock locks the lockfile via syscall based on the specified type and +// command. +func (l *LockFile) tryLock(lType lockType) error { + var success bool + if lType == readLock { + success = l.rwMutex.TryRLock() + } else { + success = l.rwMutex.TryLock() + } + if !success { + return fmt.Errorf("resource temporarily unavailable") + } + l.stateMutex.Lock() + defer l.stateMutex.Unlock() + if l.counter == 0 { + // If we're the first reference on the lock, we need to open the file again. + fd, err := openLock(l.file, l.ro) + if err != nil { + l.rwMutex.Unlock() + return err + } + l.fd = fd + + // Optimization: only use the (expensive) syscall when + // the counter is 0. In this case, we're either the first + // reader lock or a writer lock. + if err = lockHandle(l.fd, lType, true); err != nil { + closeHandle(fd) + l.rwMutex.Unlock() + return err + } + } + l.lockType = lType + l.locked = true + l.counter++ + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index 38e737e26..0eff003bc 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -74,7 +74,7 @@ func openHandle(path string, mode int) (fileHandle, error) { return fileHandle(fd), err } -func lockHandle(fd fileHandle, lType lockType) { +func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error { fType := unix.F_RDLCK if lType != readLock { fType = unix.F_WRLCK @@ -85,7 +85,15 @@ func lockHandle(fd fileHandle, lType lockType) { Start: 0, Len: 0, } - for unix.FcntlFlock(uintptr(fd), unix.F_SETLKW, &lk) != nil { + cmd := unix.F_SETLKW + if nonblocking { + cmd = unix.F_SETLK + } + for { + err := unix.FcntlFlock(uintptr(fd), cmd, &lk) + if err == nil || nonblocking { + return err + } time.Sleep(10 * time.Millisecond) } } @@ -93,3 +101,7 @@ func lockHandle(fd fileHandle, lType lockType) { func unlockAndCloseHandle(fd fileHandle) { unix.Close(int(fd)) } + +func closeHandle(fd fileHandle) { + unix.Close(int(fd)) +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go index 304c92b15..6482529b3 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -81,19 +81,30 @@ func openHandle(path string, mode int) (fileHandle, error) { return fileHandle(fd), err } -func lockHandle(fd fileHandle, lType lockType) { +func lockHandle(fd fileHandle, lType lockType, nonblocking bool) error { flags := 0 if lType != readLock { flags = windows.LOCKFILE_EXCLUSIVE_LOCK } + if nonblocking { + flags |= windows.LOCKFILE_FAIL_IMMEDIATELY + } ol := new(windows.Overlapped) if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { + if nonblocking { + return err + } panic(err) } + return nil } func unlockAndCloseHandle(fd fileHandle) { ol := new(windows.Overlapped) windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) + closeHandle(fd) +} + +func closeHandle(fd fileHandle) { windows.Close(windows.Handle(fd)) } diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go deleted file mode 100644 index 6406cb14f..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go +++ /dev/null @@ -1,75 +0,0 @@ -//go:build !windows -// +build !windows - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "errors" - "fmt" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) - Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) - Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) - Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) -} - -// CompareKernelVersion compares two kernel.VersionInfo structs. -// Returns -1 if a < b, 0 if a == b, 1 it a > b -func CompareKernelVersion(a, b VersionInfo) int { - if a.Kernel < b.Kernel { - return -1 - } else if a.Kernel > b.Kernel { - return 1 - } - - if a.Major < b.Major { - return -1 - } else if a.Major > b.Major { - return 1 - } - - if a.Minor < b.Minor { - return -1 - } else if a.Minor > b.Minor { - return 1 - } - - return 0 -} - -// ParseRelease parses a string and creates a VersionInfo based on it. -func ParseRelease(release string) (*VersionInfo, error) { - var ( - kernel, major, minor, parsed int - flavor, partial string - ) - - // Ignore error from Sscanf to allow an empty flavor. Instead, just - // make sure we got all the version numbers. - parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) - if parsed < 2 { - return nil, errors.New("Can't parse kernel version " + release) - } - - // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 - parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) - if parsed < 1 { - flavor = partial - } - - return &VersionInfo{ - Kernel: kernel, - Major: major, - Minor: minor, - Flavor: flavor, - }, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go deleted file mode 100644 index 645790da6..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build darwin -// +build darwin - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "fmt" - "os/exec" - "strings" - - "github.com/mattn/go-shellwords" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - release, err := getRelease() - if err != nil { - return nil, err - } - - return ParseRelease(release) -} - -// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version -func getRelease() (string, error) { - cmd := exec.Command("system_profiler", "SPSoftwareDataType") - osName, err := cmd.Output() - if err != nil { - return "", err - } - - var release string - data := strings.Split(string(osName), "\n") - for _, line := range data { - if strings.Contains(line, "Kernel Version") { - // It has the format like ' Kernel Version: Darwin 14.5.0' - content := strings.SplitN(line, ":", 2) - if len(content) != 2 { - return "", fmt.Errorf("kernel version is invalid") - } - - prettyNames, err := shellwords.Parse(content[1]) - if err != nil { - return "", fmt.Errorf("kernel version is invalid: %w", err) - } - - if len(prettyNames) != 2 { - return "", fmt.Errorf("kernel version needs to be 'Darwin x.x.x' ") - } - release = prettyNames[1] - } - } - - return release, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go deleted file mode 100644 index ed8cca2c6..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go +++ /dev/null @@ -1,46 +0,0 @@ -//go:build linux || freebsd || solaris || openbsd -// +build linux freebsd solaris openbsd - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "bytes" - - "github.com/sirupsen/logrus" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - uts, err := uname() - if err != nil { - return nil, err - } - - release := make([]byte, len(uts.Release)) - - i := 0 - for _, c := range uts.Release { - release[i] = byte(c) - i++ - } - - // Remove the \x00 from the release for Atoi to parse correctly - release = release[:bytes.IndexByte(release, 0)] - - return ParseRelease(string(release)) -} - -// CheckKernelVersion checks if current kernel is newer than (or equal to) -// the given version. -func CheckKernelVersion(k, major, minor int) bool { - if v, err := GetKernelVersion(); err != nil { - logrus.Warnf("Error getting kernel version: %s", err) - } else { - if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { - return false - } - } - return true -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go deleted file mode 100644 index 4b7fdee83..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -//go:build windows -// +build windows - -package kernel - -import ( - "fmt" - "unsafe" - - "golang.org/x/sys/windows" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) - major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) - minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) - build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) -} - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - var ( - h windows.Handle - dwVersion uint32 - err error - ) - - KVI := &VersionInfo{"Unknown", 0, 0, 0} - - if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, - windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - windows.KEY_READ, - &h); err != nil { - return KVI, err - } - defer windows.RegCloseKey(h) - - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err = windows.RegQueryValueEx(h, - windows.StringToUTF16Ptr("BuildLabEx"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { - return KVI, err - } - - KVI.kvi = windows.UTF16ToString(buf[:]) - - // Important - docker.exe MUST be manifested for this API to return - // the correct information. - if dwVersion, err = windows.GetVersion(); err != nil { - return KVI, err - } - - KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0xFF00) >> 8) - KVI.build = int((dwVersion & 0xFFFF0000) >> 16) - - return KVI, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go deleted file mode 100644 index e913fad00..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_freebsd.go +++ /dev/null @@ -1,17 +0,0 @@ -package kernel - -import "golang.org/x/sys/unix" - -// Utsname represents the system name structure. -// It is passthrough for unix.Utsname in order to make it portable with -// other platforms where it is not available. -type Utsname unix.Utsname - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go deleted file mode 100644 index e913fad00..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -package kernel - -import "golang.org/x/sys/unix" - -// Utsname represents the system name structure. -// It is passthrough for unix.Utsname in order to make it portable with -// other platforms where it is not available. -type Utsname unix.Utsname - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go deleted file mode 100644 index 49370bd3d..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go +++ /dev/null @@ -1,14 +0,0 @@ -package kernel - -import ( - "golang.org/x/sys/unix" -) - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go deleted file mode 100644 index 12671db51..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build openbsd -// +build openbsd - -package kernel - -import ( - "fmt" - "runtime" -) - -// A stub called by kernel_unix.go . -func uname() (*Utsname, error) { - return nil, fmt.Errorf("Kernel version detection is not available on %s", runtime.GOOS) -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go deleted file mode 100644 index f515500c9..000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported_type.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !linux && !solaris && !freebsd -// +build !linux,!solaris,!freebsd - -package kernel - -// Utsname represents the system name structure. -// It is defined here to make it portable as it is available on linux but not -// on windows. -type Utsname struct { - Release [65]byte -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c index f5a7c3a25..a2800654f 100644 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 924e8f13a..0f8d1f024 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -173,79 +173,3 @@ mountopt = "nodev" # "force_mask" permissions. # # force_mask = "" - -[storage.options.thinpool] -# Storage Options for thinpool - -# autoextend_percent determines the amount by which pool needs to be -# grown. This is specified in terms of % of pool size. So a value of 20 means -# that when threshold is hit, pool will be grown by 20% of existing -# pool size. -# autoextend_percent = "20" - -# autoextend_threshold determines the pool extension threshold in terms -# of percentage of pool size. For example, if threshold is 60, that means when -# pool is 60% full, threshold has been hit. -# autoextend_threshold = "80" - -# basesize specifies the size to use when creating the base device, which -# limits the size of images and containers. -# basesize = "10G" - -# blocksize specifies a custom blocksize to use for the thin pool. -# blocksize="64k" - -# directlvm_device specifies a custom block storage device to use for the -# thin pool. Required if you setup devicemapper. -# directlvm_device = "" - -# directlvm_device_force wipes device even if device already has a filesystem. -# directlvm_device_force = "True" - -# fs specifies the filesystem type to use for the base device. -# fs="xfs" - -# log_level sets the log level of devicemapper. -# 0: LogLevelSuppress 0 (Default) -# 2: LogLevelFatal -# 3: LogLevelErr -# 4: LogLevelWarn -# 5: LogLevelNotice -# 6: LogLevelInfo -# 7: LogLevelDebug -# log_level = "7" - -# min_free_space specifies the min free space percent in a thin pool require for -# new device creation to succeed. Valid values are from 0% - 99%. -# Value 0% disables -# min_free_space = "10%" - -# mkfsarg specifies extra mkfs arguments to be used when creating the base -# device. -# mkfsarg = "" - -# metadata_size is used to set the `pvcreate --metadatasize` options when -# creating thin devices. Default is 128k -# metadata_size = "" - -# Size is used to set a maximum size of the container image. -# size = "" - -# use_deferred_removal marks devicemapper block device for deferred removal. -# If the thinpool is in use when the driver attempts to remove it, the driver -# tells the kernel to remove it as soon as possible. Note this does not free -# up the disk space, use deferred deletion to fully remove the thinpool. -# use_deferred_removal = "True" - -# use_deferred_deletion marks thinpool device for deferred deletion. -# If the device is busy when the driver attempts to delete it, the driver -# will attempt to delete device every 30 seconds until successful. -# If the program using the driver exits, the driver will continue attempting -# to cleanup the next time the driver is used. Deferred deletion permanently -# deletes the device and all data stored in device will be lost. -# use_deferred_deletion = "True" - -# xfs_nospace_max_retries specifies the maximum number of retries XFS should -# attempt to complete IO when ENOSPC (no space) error is returned by -# underlying storage device. -# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd index 03bbe2461..43278a1fc 100644 --- a/vendor/github.com/containers/storage/storage.conf-freebsd +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -134,79 +134,3 @@ mountopt = "nodev" # "force_mask" permissions. # # force_mask = "" - -[storage.options.thinpool] -# Storage Options for thinpool - -# autoextend_percent determines the amount by which pool needs to be -# grown. This is specified in terms of % of pool size. So a value of 20 means -# that when threshold is hit, pool will be grown by 20% of existing -# pool size. -# autoextend_percent = "20" - -# autoextend_threshold determines the pool extension threshold in terms -# of percentage of pool size. For example, if threshold is 60, that means when -# pool is 60% full, threshold has been hit. -# autoextend_threshold = "80" - -# basesize specifies the size to use when creating the base device, which -# limits the size of images and containers. -# basesize = "10G" - -# blocksize specifies a custom blocksize to use for the thin pool. -# blocksize="64k" - -# directlvm_device specifies a custom block storage device to use for the -# thin pool. Required if you setup devicemapper. -# directlvm_device = "" - -# directlvm_device_force wipes device even if device already has a filesystem. -# directlvm_device_force = "True" - -# fs specifies the filesystem type to use for the base device. -# fs="xfs" - -# log_level sets the log level of devicemapper. -# 0: LogLevelSuppress 0 (Default) -# 2: LogLevelFatal -# 3: LogLevelErr -# 4: LogLevelWarn -# 5: LogLevelNotice -# 6: LogLevelInfo -# 7: LogLevelDebug -# log_level = "7" - -# min_free_space specifies the min free space percent in a thin pool require for -# new device creation to succeed. Valid values are from 0% - 99%. -# Value 0% disables -# min_free_space = "10%" - -# mkfsarg specifies extra mkfs arguments to be used when creating the base -# device. -# mkfsarg = "" - -# metadata_size is used to set the `pvcreate --metadatasize` options when -# creating thin devices. Default is 128k -# metadata_size = "" - -# Size is used to set a maximum size of the container image. -# size = "" - -# use_deferred_removal marks devicemapper block device for deferred removal. -# If the thinpool is in use when the driver attempts to remove it, the driver -# tells the kernel to remove it as soon as possible. Note this does not free -# up the disk space, use deferred deletion to fully remove the thinpool. -# use_deferred_removal = "True" - -# use_deferred_deletion marks thinpool device for deferred deletion. -# If the device is busy when the driver attempts to delete it, the driver -# will attempt to delete device every 30 seconds until successful. -# If the program using the driver exits, the driver will continue attempting -# to cleanup the next time the driver is used. Deferred deletion permanently -# deletes the device and all data stored in device will be lost. -# use_deferred_deletion = "True" - -# xfs_nospace_max_retries specifies the maximum number of retries XFS should -# attempt to complete IO when ENOSPC (no space) error is returned by -# underlying storage device. -# xfs_nospace_max_retries = "0" diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index c6f125189..957675ba4 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -330,17 +330,9 @@ type Store interface { // successfully applied with ApplyDiffFromStagingDirectory. ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) - // ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff. - // Deprecated: it will be removed soon. Use ApplyStagedLayer instead. - ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error - - // CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors - // Deprecated: it will be removed soon. Use CleanupStagedLayer instead. - CleanupStagingDirectory(stagingDirectory string) error - - // ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory, - // marking the layer for automatic removal if applying the diff fails - // for any reason. + // ApplyStagedLayer combines the functions of creating a layer and using the staging + // directory to populate it. + // It marks the layer for automatic removal if applying the diff fails for any reason. ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) // CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors @@ -549,14 +541,14 @@ type Store interface { GetDigestLock(digest.Digest) (Locker, error) // LayerFromAdditionalLayerStore searches the additional layer store and returns an object - // which can create a layer with the specified digest associated with the specified image + // which can create a layer with the specified TOC digest associated with the specified image // reference. Note that this hasn't been stored to this store yet: the actual creation of // a usable layer is done by calling the returned object's PutAs() method. After creating // a layer, the caller must then call the object's Release() method to free any temporary // resources which were allocated for the object by this method or the object's PutAs() // method. // This API is experimental and can be changed without bumping the major version number. - LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) + LookupAdditionalLayer(tocDigest digest.Digest, imageref string) (AdditionalLayer, error) // Tries to clean up remainders of previous containers or layers that are not // references in the json files. These can happen in the case of unclean @@ -578,8 +570,8 @@ type AdditionalLayer interface { // layer store. PutAs(id, parent string, names []string) (*Layer, error) - // UncompressedDigest returns the uncompressed digest of this layer - UncompressedDigest() digest.Digest + // TOCDigest returns the digest of TOC of this layer. Returns "" if unknown. + TOCDigest() digest.Digest // CompressedSize returns the compressed size of this layer CompressedSize() int64 @@ -1445,20 +1437,8 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool { return true } -func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) { - rlstore, rlstores, err := s.bothLayerStoreKinds() - if err != nil { - return nil, -1, err - } - if err := rlstore.startWriting(); err != nil { - return nil, -1, err - } - defer rlstore.stopWriting() - if err := s.containerStore.startWriting(); err != nil { - return nil, -1, err - } - defer s.containerStore.stopWriting() - +// putLayer requires the rlstore, rlstores, as well as s.containerStore (even if not an argument to this function) to be locked for write. +func (s *store) putLayer(rlstore rwLayerStore, rlstores []roLayerStore, id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) { var parentLayer *Layer var options LayerOptions if lOptions != nil { @@ -1537,7 +1517,19 @@ func (s *store) putLayer(id, parent string, names []string, mountLabel string, w } func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) { - return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil) + rlstore, rlstores, err := s.bothLayerStoreKinds() + if err != nil { + return nil, -1, err + } + if err := rlstore.startWriting(); err != nil { + return nil, -1, err + } + defer rlstore.stopWriting() + if err := s.containerStore.startWriting(); err != nil { + return nil, -1, err + } + defer s.containerStore.stopWriting() + return s.putLayer(rlstore, rlstores, id, parent, names, mountLabel, writeable, lOptions, diff, nil) } func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) { @@ -2820,22 +2812,42 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { } defer s.stopUsingGraphDriver() - rlstore, err := s.getLayerStoreLocked() + rlstore, lstores, err := s.bothLayerStoreKindsLocked() if err != nil { return "", err } - if err := rlstore.startWriting(); err != nil { - return "", err - } - defer rlstore.stopWriting() - if options.UidMaps != nil || options.GidMaps != nil { options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) } - if rlstore.Exists(id) { - return rlstore.Mount(id, options) + // function used to have a scope for rlstore.StopWriting() + tryMount := func() (string, error) { + if err := rlstore.startWriting(); err != nil { + return "", err + } + defer rlstore.stopWriting() + if rlstore.Exists(id) { + return rlstore.Mount(id, options) + } + return "", nil } + mountPoint, err := tryMount() + if mountPoint != "" || err != nil { + return mountPoint, err + } + + // check if the layer is in a read-only store, and return a better error message + for _, store := range lstores { + if err := store.startReading(); err != nil { + return "", err + } + exists := store.Exists(id) + store.stopReading() + if exists { + return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrLayerUnknown) + } + } + return "", ErrLayerUnknown } @@ -2982,36 +2994,39 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro return nil, ErrLayerUnknown } -func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error { - if stagingDirectory != diffOutput.Target { - return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory") - } - _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { - if !rlstore.Exists(to) { - return struct{}{}, ErrLayerUnknown - } - return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options) - }) - return err -} - func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) { + rlstore, rlstores, err := s.bothLayerStoreKinds() + if err != nil { + return nil, err + } + if err := rlstore.startWriting(); err != nil { + return nil, err + } + defer rlstore.stopWriting() + + layer, err := rlstore.Get(args.ID) + if err != nil && !errors.Is(err, ErrLayerUnknown) { + return layer, err + } + if err == nil { + return layer, rlstore.applyDiffFromStagingDirectory(args.ID, args.DiffOutput, args.DiffOptions) + } + + // if the layer doesn't exist yet, try to create it. + + if err := s.containerStore.startWriting(); err != nil { + return nil, err + } + defer s.containerStore.stopWriting() + slo := stagedLayerOptions{ DiffOutput: args.DiffOutput, DiffOptions: args.DiffOptions, } - - layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo) + layer, _, err = s.putLayer(rlstore, rlstores, args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo) return layer, err } -func (s *store) CleanupStagingDirectory(stagingDirectory string) error { - _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { - return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory) - }) - return err -} - func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error { _, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) { return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target) @@ -3193,7 +3208,7 @@ func (s *store) Layer(id string) (*Layer, error) { return nil, ErrLayerUnknown } -func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (AdditionalLayer, error) { +func (s *store) LookupAdditionalLayer(tocDigest digest.Digest, imageref string) (AdditionalLayer, error) { var adriver drivers.AdditionalLayerStoreDriver if err := func() error { // A scope for defer if err := s.startUsingGraphDriver(); err != nil { @@ -3210,7 +3225,7 @@ func (s *store) LookupAdditionalLayer(d digest.Digest, imageref string) (Additio return nil, err } - al, err := adriver.LookupAdditionalLayer(d, imageref) + al, err := adriver.LookupAdditionalLayer(tocDigest, imageref) if err != nil { if errors.Is(err, drivers.ErrLayerUnknown) { return nil, ErrLayerUnknown @@ -3235,8 +3250,8 @@ type additionalLayer struct { s *store } -func (al *additionalLayer) UncompressedDigest() digest.Digest { - return al.layer.UncompressedDigest +func (al *additionalLayer) TOCDigest() digest.Digest { + return al.layer.TOCDigest } func (al *additionalLayer) CompressedSize() int64 { diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index ad0bfa43a..03e5f7ab6 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -11,6 +11,7 @@ import ( "github.com/BurntSushi/toml" cfg "github.com/containers/storage/pkg/config" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" @@ -76,7 +77,7 @@ func loadDefaultStoreOptions() { if path, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok { homeConfigFile := filepath.Join(path, "containers", "storage.conf") - if _, err := os.Stat(homeConfigFile); err == nil { + if err := fileutils.Exists(homeConfigFile); err == nil { // user storage.conf in XDG_CONFIG_HOME if it exists defaultOverrideConfigFile = homeConfigFile } else { @@ -87,7 +88,7 @@ func loadDefaultStoreOptions() { } } - _, err := os.Stat(defaultOverrideConfigFile) + err := fileutils.Exists(defaultOverrideConfigFile) if err == nil { // The DefaultConfigFile() function returns the path // of the used storage.conf file, by returning defaultConfigFile @@ -150,7 +151,7 @@ func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) { return storageOpts, err } } - _, err = os.Stat(storageConf) + err = fileutils.Exists(storageConf) if err != nil && !os.IsNotExist(err) { return storageOpts, err } diff --git a/vendor/github.com/containers/storage/types/storage_test.conf b/vendor/github.com/containers/storage/types/storage_test.conf index 87b0c9bb1..c42d33fb9 100644 --- a/vendor/github.com/containers/storage/types/storage_test.conf +++ b/vendor/github.com/containers/storage/types/storage_test.conf @@ -39,7 +39,3 @@ remap-gids = "0:1500000000:60000" # mountopt specifies comma separated list of extra mount options mountopt = "nodev" - - -[storage.options.thinpool] -# Storage Options for thinpool diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 5b4b31b80..b313a4728 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" ) @@ -31,7 +32,7 @@ func DefaultConfigFile() (string, error) { return path, nil } if !usePerUserStorage() { - if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + if err := fileutils.Exists(defaultOverrideConfigFile); err == nil { return defaultOverrideConfigFile, nil } return defaultConfigFile, nil diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index abd410582..3a4036fb4 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.4 +0.2.5 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index aa32b85fb..5ac23b998 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -11,7 +11,6 @@ package securejoin import ( - "bytes" "errors" "os" "path/filepath" @@ -19,6 +18,8 @@ import ( "syscall" ) +const maxSymlinkLimit = 255 + // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is // effectively a more broad version of os.IsNotExist. @@ -51,71 +52,69 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { } unsafePath = filepath.FromSlash(unsafePath) - var path bytes.Buffer - n := 0 - for unsafePath != "" { - if n > 255 { - return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + var ( + currentPath string + remainingPath = unsafePath + linksWalked int + ) + for remainingPath != "" { + if v := filepath.VolumeName(remainingPath); v != "" { + remainingPath = remainingPath[len(v):] } - if v := filepath.VolumeName(unsafePath); v != "" { - unsafePath = unsafePath[len(v):] - } - - // Next path component, p. - i := strings.IndexRune(unsafePath, filepath.Separator) - var p string - if i == -1 { - p, unsafePath = unsafePath, "" + // Get the next path component. + var part string + if i := strings.IndexRune(remainingPath, filepath.Separator); i == -1 { + part, remainingPath = remainingPath, "" } else { - p, unsafePath = unsafePath[:i], unsafePath[i+1:] + part, remainingPath = remainingPath[:i], remainingPath[i+1:] } - // Create a cleaned path, using the lexical semantics of /../a, to - // create a "scoped" path component which can safely be joined to fullP - // for evaluation. At this point, path.String() doesn't contain any - // symlink components. - cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) - if cleanP == string(filepath.Separator) { - path.Reset() + // Apply the component lexically to the path we are building. + // currentPath does not contain any symlinks, and we are lexically + // dealing with a single component, so it's okay to do a filepath.Clean + // here. + nextPath := filepath.Join(string(filepath.Separator), currentPath, part) + if nextPath == string(filepath.Separator) { + currentPath = "" continue } - fullP := filepath.Clean(root + cleanP) + fullPath := root + string(filepath.Separator) + nextPath // Figure out whether the path is a symlink. - fi, err := vfs.Lstat(fullP) + fi, err := vfs.Lstat(fullPath) if err != nil && !IsNotExist(err) { return "", err } // Treat non-existent path components the same as non-symlinks (we // can't do any better here). if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { - path.WriteString(p) - path.WriteRune(filepath.Separator) + currentPath = nextPath continue } - // Only increment when we actually dereference a link. - n++ + // It's a symlink, so get its contents and expand it by prepending it + // to the yet-unparsed path. + linksWalked++ + if linksWalked > maxSymlinkLimit { + return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + } - // It's a symlink, expand it by prepending it to the yet-unparsed path. - dest, err := vfs.Readlink(fullP) + dest, err := vfs.Readlink(fullPath) if err != nil { return "", err } + remainingPath = dest + string(filepath.Separator) + remainingPath // Absolute symlinks reset any work we've already done. if filepath.IsAbs(dest) { - path.Reset() + currentPath = "" } - unsafePath = dest + string(filepath.Separator) + unsafePath } - // We have to clean path.String() here because it may contain '..' - // components that are entirely lexical, but would be misleading otherwise. - // And finally do a final clean to ensure that root is also lexically - // clean. - fullP := filepath.Clean(string(filepath.Separator) + path.String()) - return filepath.Clean(root + fullP), nil + // There should be no lexical components like ".." left in the path here, + // but for safety clean up the path before joining it to the root. + finalPath := filepath.Join(string(filepath.Separator), currentPath) + return filepath.Join(root, finalPath), nil } // SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md index e2531e49c..172a02e0b 100644 --- a/vendor/github.com/distribution/reference/README.md +++ b/vendor/github.com/distribution/reference/README.md @@ -10,7 +10,7 @@ Go library to handle references to container images. [![codecov](https://codecov.io/gh/distribution/reference/branch/main/graph/badge.svg)](https://codecov.io/gh/distribution/reference) [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference.svg?type=shield)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield) -This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. +This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details. ## Contribution diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go index a30229d01..f4128314c 100644 --- a/vendor/github.com/distribution/reference/normalize.go +++ b/vendor/github.com/distribution/reference/normalize.go @@ -123,20 +123,51 @@ func ParseDockerRef(ref string) (Named, error) { // splitDockerDomain splits a repository name to domain and remote-name. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] +func splitDockerDomain(name string) (domain, remoteName string) { + maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/") + if !ok { + // Fast-path for single element ("familiar" names), such as "ubuntu" + // or "ubuntu:latest". Familiar names must be handled separately, to + // prevent them from being handled as "hostname:port". + // + // Canonicalize them as "docker.io/library/name[:tag]" + + // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain. + return defaultDomain, officialRepoPrefix + name } - if domain == legacyDefaultDomain { - domain = defaultDomain + + switch { + case maybeDomain == localhost: + // localhost is a reserved namespace and always considered a domain. + domain, remoteName = maybeDomain, maybeRemoteName + case maybeDomain == legacyDefaultDomain: + // canonicalize the Docker Hub and legacy "Docker Index" domains. + domain, remoteName = defaultDomain, maybeRemoteName + case strings.ContainsAny(maybeDomain, ".:"): + // Likely a domain or IP-address: + // + // - contains a "." (e.g., "example.com" or "127.0.0.1") + // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000") + domain, remoteName = maybeDomain, maybeRemoteName + case strings.ToLower(maybeDomain) != maybeDomain: + // Uppercase namespaces are not allowed, so if the first element + // is not lowercase, we assume it to be a domain-name. + domain, remoteName = maybeDomain, maybeRemoteName + default: + // None of the above: it's not a domain, so use the default, and + // use the name input the remote-name. + domain, remoteName = defaultDomain, name } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoPrefix + remainder + + if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') { + // Canonicalize "familiar" names, but only on Docker Hub, not + // on other domains: + // + // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]" + remoteName = officialRepoPrefix + remoteName } - return + + return domain, remoteName } // familiarizeName returns a shortened version of the name familiar diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go index e98c44daa..900398bde 100644 --- a/vendor/github.com/distribution/reference/reference.go +++ b/vendor/github.com/distribution/reference/reference.go @@ -35,8 +35,13 @@ import ( ) const ( + // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name. + RepositoryNameTotalLengthMax = 255 + // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 + // + // Deprecated: use [RepositoryNameTotalLengthMax] instead. + NameTotalLengthMax = RepositoryNameTotalLengthMax ) var ( @@ -55,8 +60,8 @@ var ( // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) // ErrNameNotCanonical is returned when a name is not canonical. ErrNameNotCanonical = errors.New("repository name must be canonical") @@ -165,6 +170,9 @@ func Path(named Named) (name string) { return path } +// splitDomain splits a named reference into a hostname and path string. +// If no valid hostname is found, the hostname is empty and the full value +// is returned as name func splitDomain(name string) (string, string) { match := anchoredNameRegexp.FindStringSubmatch(name) if len(match) != 3 { @@ -173,19 +181,6 @@ func splitDomain(name string) (string, string) { return match[1], match[2] } -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// -// Deprecated: Use [Domain] or [Path]. -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. func Parse(s string) (Reference, error) { @@ -200,10 +195,6 @@ func Parse(s string) (Reference, error) { return nil, ErrReferenceInvalidFormat } - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) @@ -215,6 +206,10 @@ func Parse(s string) (Reference, error) { repo.path = matches[1] } + if len(repo.path) > RepositoryNameTotalLengthMax { + return nil, ErrNameTooLong + } + ref := reference{ namedRepository: repo, tag: matches[2], @@ -253,14 +248,15 @@ func ParseNamed(s string) (Named, error) { // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } + + if len(match[2]) > RepositoryNameTotalLengthMax { + return nil, ErrNameTooLong + } + return repository{ domain: match[1], path: match[2], diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 48d04f9a9..36315d429 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -669,6 +669,7 @@ Erik Hollensbe Erik Inge Bolsø Erik Kristensen Erik Sipsma +Erik Sjölund Erik St. Martin Erik Weathers Erno Hopearuoho @@ -731,6 +732,7 @@ Feroz Salam Ferran Rodenas Filipe Brandenburger Filipe Oliveira +Filipe Pina Flavio Castelli Flavio Crisciani Florian @@ -875,6 +877,8 @@ Hsing-Yu (David) Chen hsinko <21551195@zju.edu.cn> Hu Keping Hu Tao +Huajin Tong +huang-jl <1046678590@qq.com> HuanHuan Ye Huanzhong Zhang Huayi Zhang @@ -969,6 +973,7 @@ Jannick Fahlbusch Januar Wayong Jared Biel Jared Hocutt +Jaroslav Jindrak Jaroslaw Zabiello Jasmine Hegman Jason A. Donenfeld @@ -1012,6 +1017,7 @@ Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske +Jeongseok Kang Jeremy Chambers Jeremy Grosser Jeremy Huntwork @@ -1029,6 +1035,7 @@ Jezeniel Zapanta Jhon Honce Ji.Zhilong Jian Liao +Jian Zeng Jian Zhang Jiang Jinyang Jianyong Wu @@ -1967,6 +1974,7 @@ Sergey Evstifeev Sergii Kabashniuk Sergio Lopez Serhat Gülçiçek +Serhii Nakon SeungUkLee Sevki Hasirci Shane Canon @@ -2253,6 +2261,7 @@ VladimirAus Vladislav Kolesnikov Vlastimil Zeman Vojtech Vitek (V-Teq) +voloder <110066198+voloder@users.noreply.github.com> Walter Leibbrandt Walter Stanish Wang Chao diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 37e553d41..b11c2fe02 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -2,8 +2,17 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( - // DefaultVersion of Current REST API - DefaultVersion = "1.44" + // DefaultVersion of the current REST API. + DefaultVersion = "1.45" + + // MinSupportedAPIVersion is the minimum API version that can be supported + // by the API server, specified as "major.minor". Note that the daemon + // may be configured with a different minimum API version, as returned + // in [github.com/docker/docker/api/types.Version.MinAPIVersion]. + // + // API requests for API versions lower than the configured version produce + // an error. + MinSupportedAPIVersion = "1.24" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 201b54906..5677340db 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.44" +basePath: "/v1.45" info: title: "Docker Engine API" - version: "1.44" + version: "1.45" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.44) is used. - For example, calling `/info` is the same as calling `/v1.44/info`. Using the + If you omit the version-prefix, the current version of the API (v1.45) is used. + For example, calling `/info` is the same as calling `/v1.45/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -427,6 +427,10 @@ definitions: type: "object" additionalProperties: type: "string" + Subpath: + description: "Source path inside the volume. Must be relative without any back traversals." + type: "string" + example: "dir-inside-volume/subdirectory" TmpfsOptions: description: "Optional configuration for the `tmpfs` type." type: "object" @@ -8770,8 +8774,7 @@ paths:


- > **Deprecated**: This field is deprecated and will always - > be "false" in future. + > **Deprecated**: This field is deprecated and will always be "false". type: "boolean" example: false name: @@ -8814,13 +8817,8 @@ paths: description: | A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - `is-automated=(true|false)` (deprecated, see below) - `is-official=(true|false)` - `stars=` Matches images that has at least 'number' stars. - - The `is-automated` filter is deprecated. The `is_automated` field has - been deprecated by Docker Hub's search API. Consequently, searching - for `is-automated=true` will yield no results. type: "string" tags: ["Image"] /images/prune: diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 24b00a275..882201f0e 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -157,42 +157,12 @@ type ImageBuildResponse struct { OSType string } -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - // ImageImportSource holds source information for ImageImport type ImageImportSource struct { Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. } -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to list images with. -type ImageListOptions struct { - // All controls whether all images in the graph are filtered, or just - // the heads. - All bool - - // Filters is a JSON-encoded set of filter arguments. - Filters filters.Args - - // SharedSize indicates whether the shared size of images should be computed. - SharedSize bool - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool -} - // ImageLoadResponse returns information to the client about a load process. type ImageLoadResponse struct { // Body must be closed to avoid a resource leak @@ -200,14 +170,6 @@ type ImageLoadResponse struct { JSON bool } -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - // RequestPrivilegeFunc is a function interface that // clients can supply to retry operations after // getting an authorization error. @@ -216,15 +178,6 @@ type ImagePullOptions struct { // if the privilege request fails. type RequestPrivilegeFunc func() (string, error) -// ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - // ImageSearchOptions holds parameters to search images with. type ImageSearchOptions struct { RegistryAuth string diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index be41d6315..86f46b74a 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -5,8 +5,8 @@ import ( "time" "github.com/docker/docker/api/types/strslice" - dockerspec "github.com/docker/docker/image/spec/specs-go/v1" "github.com/docker/go-connections/nat" + dockerspec "github.com/moby/docker-image-spec/specs-go/v1" ) // MinimumDuration puts a minimum on user configured duration. diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index 3cefecb0d..c6b1f351b 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -1,9 +1,57 @@ package image -import ocispec "github.com/opencontainers/image-spec/specs-go/v1" +import "github.com/docker/docker/api/types/filters" -// GetImageOpts holds parameters to inspect an image. -type GetImageOpts struct { - Platform *ocispec.Platform - Details bool +// ImportOptions holds information to import images from the client host. +type ImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// CreateOptions holds information to create images. +type CreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// PullOptions holds information to pull images. +type PullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. + PrivilegeFunc func() (string, error) + Platform string +} + +// PushOptions holds information to push images. +type PushOptions PullOptions + +// ListOptions holds parameters to list images with. +type ListOptions struct { + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. + Filters filters.Args + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // ContainerCount indicates whether container count should be computed. + ContainerCount bool +} + +// RemoveOptions holds parameters to remove images. +type RemoveOptions struct { + Force bool + PruneChildren bool } diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 57edf2ef1..6fe04da25 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -96,6 +96,7 @@ type BindOptions struct { type VolumeOptions struct { NoCopy bool `json:",omitempty"` Labels map[string]string `json:",omitempty"` + Subpath string `json:",omitempty"` DriverConfig *Driver `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 05cb31075..6bbae93ef 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -94,7 +94,7 @@ type SearchResult struct { Name string `json:"name"` // IsAutomated indicates whether the result is automated. // - // Deprecated: the "is_automated" field is deprecated and will always be "false" in the future. + // Deprecated: the "is_automated" field is deprecated and will always be "false". IsAutomated bool `json:"is_automated"` // Description is a textual description of the repository Description string `json:"description"` diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 56a8b77d4..ca07162a2 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -82,7 +82,7 @@ type ImageInspect struct { // Depending on how the image was created, this field may be empty. // // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - Container string + Container string `json:",omitempty"` // ContainerConfig is an optional field containing the configuration of the // container that was last committed when creating the image. @@ -91,7 +91,7 @@ type ImageInspect struct { // and it is not in active use anymore. // // Deprecated: this field is omitted in API v1.45, but kept for backward compatibility. - ContainerConfig *container.Config + ContainerConfig *container.Config `json:",omitempty"` // DockerVersion is the version of Docker that was used to build the image. // diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go index e332a7bb6..231a5cca4 100644 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -1,138 +1,35 @@ package types import ( - "github.com/docker/docker/api/types/checkpoint" - "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/system" ) -// CheckpointCreateOptions holds parameters to create a checkpoint from a container. +// ImageImportOptions holds information to import images from the client host. // -// Deprecated: use [checkpoint.CreateOptions]. -type CheckpointCreateOptions = checkpoint.CreateOptions +// Deprecated: use [image.ImportOptions]. +type ImageImportOptions = image.ImportOptions -// CheckpointListOptions holds parameters to list checkpoints for a container +// ImageCreateOptions holds information to create images. // -// Deprecated: use [checkpoint.ListOptions]. -type CheckpointListOptions = checkpoint.ListOptions +// Deprecated: use [image.CreateOptions]. +type ImageCreateOptions = image.CreateOptions -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +// ImagePullOptions holds information to pull images. // -// Deprecated: use [checkpoint.DeleteOptions]. -type CheckpointDeleteOptions = checkpoint.DeleteOptions +// Deprecated: use [image.PullOptions]. +type ImagePullOptions = image.PullOptions -// Checkpoint represents the details of a checkpoint when listing endpoints. +// ImagePushOptions holds information to push images. // -// Deprecated: use [checkpoint.Summary]. -type Checkpoint = checkpoint.Summary +// Deprecated: use [image.PushOptions]. +type ImagePushOptions = image.PushOptions -// Info contains response of Engine API: -// GET "/info" +// ImageListOptions holds parameters to list images with. // -// Deprecated: use [system.Info]. -type Info = system.Info +// Deprecated: use [image.ListOptions]. +type ImageListOptions = image.ListOptions -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. +// ImageRemoveOptions holds parameters to remove images. // -// Deprecated: use [system.Commit]. -type Commit = system.Commit - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by [system.Info] struct -// -// Deprecated: use [system.PluginsInfo]. -type PluginsInfo = system.PluginsInfo - -// NetworkAddressPool is a temp struct used by [system.Info] struct. -// -// Deprecated: use [system.NetworkAddressPool]. -type NetworkAddressPool = system.NetworkAddressPool - -// Runtime describes an OCI runtime. -// -// Deprecated: use [system.Runtime]. -type Runtime = system.Runtime - -// SecurityOpt contains the name and options of a security option. -// -// Deprecated: use [system.SecurityOpt]. -type SecurityOpt = system.SecurityOpt - -// KeyValue holds a key/value pair. -// -// Deprecated: use [system.KeyValue]. -type KeyValue = system.KeyValue - -// ImageDeleteResponseItem image delete response item. -// -// Deprecated: use [image.DeleteResponse]. -type ImageDeleteResponseItem = image.DeleteResponse - -// ImageSummary image summary. -// -// Deprecated: use [image.Summary]. -type ImageSummary = image.Summary - -// ImageMetadata contains engine-local data about the image. -// -// Deprecated: use [image.Metadata]. -type ImageMetadata = image.Metadata - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -// -// Deprecated: use [swarm.ServiceCreateResponse]. -type ServiceCreateResponse = swarm.ServiceCreateResponse - -// ServiceUpdateResponse service update response. -// -// Deprecated: use [swarm.ServiceUpdateResponse]. -type ServiceUpdateResponse = swarm.ServiceUpdateResponse - -// ContainerStartOptions holds parameters to start containers. -// -// Deprecated: use [container.StartOptions]. -type ContainerStartOptions = container.StartOptions - -// ResizeOptions holds parameters to resize a TTY. -// It can be used to resize container TTYs and -// exec process TTYs too. -// -// Deprecated: use [container.ResizeOptions]. -type ResizeOptions = container.ResizeOptions - -// ContainerAttachOptions holds parameters to attach to a container. -// -// Deprecated: use [container.AttachOptions]. -type ContainerAttachOptions = container.AttachOptions - -// ContainerCommitOptions holds parameters to commit changes into a container. -// -// Deprecated: use [container.CommitOptions]. -type ContainerCommitOptions = container.CommitOptions - -// ContainerListOptions holds parameters to list containers with. -// -// Deprecated: use [container.ListOptions]. -type ContainerListOptions = container.ListOptions - -// ContainerLogsOptions holds parameters to filter logs with. -// -// Deprecated: use [container.LogsOptions]. -type ContainerLogsOptions = container.LogsOptions - -// ContainerRemoveOptions holds parameters to remove containers. -// -// Deprecated: use [container.RemoveOptions]. -type ContainerRemoveOptions = container.RemoveOptions - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// [system.SecurityOpt]. -// -// Deprecated: use [system.DecodeSecurityOptions]. -func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) { - return system.DecodeSecurityOptions(opts) -} +// Deprecated: use [image.RemoveOptions]. +type ImageRemoveOptions = image.RemoveOptions diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911edb..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go index 55fc5d389..bbd9ff0b8 100644 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -238,13 +238,13 @@ type TopologyRequirement struct { // If requisite is specified, all topologies in preferred list MUST // also be present in the list of requisite topologies. // - // If the SP is unable to to make the provisioned volume available + // If the SP is unable to make the provisioned volume available // from any of the preferred topologies, the SP MAY choose a topology // from the list of requisite topologies. // If the list of requisite topologies is not specified, then the SP // MAY choose from the list of all possible topologies. // If the list of requisite topologies is specified and the SP is - // unable to to make the provisioned volume available from any of the + // unable to make the provisioned volume available from any of the // requisite topologies it MUST fail the CreateVolume call. // // Example 1: @@ -254,7 +254,7 @@ type TopologyRequirement struct { // {"region": "R1", "zone": "Z3"} // preferred = // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume + // then the SP SHOULD first attempt to make the provisioned volume // available from "zone" "Z3" in the "region" "R1" and fall back to // "zone" "Z2" in the "region" "R1" if that is not possible. // @@ -268,7 +268,7 @@ type TopologyRequirement struct { // preferred = // {"region": "R1", "zone": "Z4"}, // {"region": "R1", "zone": "Z2"} - // then the the SP SHOULD first attempt to make the provisioned volume + // then the SP SHOULD first attempt to make the provisioned volume // accessible from "zone" "Z4" in the "region" "R1" and fall back to // "zone" "Z2" in the "region" "R1" if that is not possible. If that // is not possible, the SP may choose between either the "zone" @@ -287,7 +287,7 @@ type TopologyRequirement struct { // preferred = // {"region": "R1", "zone": "Z5"}, // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume + // then the SP SHOULD first attempt to make the provisioned volume // accessible from the combination of the two "zones" "Z5" and "Z3" in // the "region" "R1". If that's not possible, it should fall back to // a combination of "Z5" and other possibilities from the list of diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index 68ef31b78..68e6ec5ed 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -10,11 +10,11 @@ import ( ) // DistributionInspect returns the image digest with the full manifest. -func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) { +func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) { // Contact the registry to retrieve digest and platform information var distributionInspect registry.DistributionInspect - if image == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: image} + if imageRef == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: imageRef} } if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil { @@ -28,7 +28,7 @@ func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegist } } - resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers) defer ensureReaderClosed(resp) if err != nil { return distributionInspect, err diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 29cd0b437..7c7873dca 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -8,13 +8,13 @@ import ( "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" ) // ImageCreate creates a new image based on the parent options. // It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(parentReference) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index cd376a14e..5a890b0c5 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -8,11 +8,12 @@ import ( "github.com/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) // ImageImport creates a new image based on the source options. // It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) { if ref != "" { // Check if the given image name can be resolved if _, err := reference.ParseNormalizedNamed(ref); err != nil { diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index fa6aecfc6..a9cc1e21e 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -5,14 +5,13 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/versions" ) // ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) { +func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) { var images []image.Summary // Make sure we negotiated (if the client is configured to do so), diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index d92049d58..6438cf6a9 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/errdefs" ) @@ -19,7 +19,7 @@ import ( // FIXME(vdemeester): there is currently used in a few way in docker/docker // - if not in trusted content, ref is used to pass the whole reference, and tag is empty // - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { +func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.PullOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(refStr) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index 6839a89e0..e6a6b11ee 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -8,7 +8,7 @@ import ( "net/url" "github.com/distribution/reference" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" ) @@ -17,7 +17,7 @@ import ( // It executes the privileged function if the operation is unauthorized // and it tries one more time. // It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { +func (cli *Client) ImagePush(ctx context.Context, image string, options image.PushOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(image) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index b936d2083..652d1bfa3 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -5,12 +5,11 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/image" ) // ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) { +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options image.RemoveOptions) ([]image.DeleteResponse, error) { query := url.Values{} if options.Force { diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 302f5fb13..45d233f25 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -90,15 +90,15 @@ type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) BuildCancel(ctx context.Context, id string) error - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) + ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) + ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml index e24a6c14e..22f8d21cc 100644 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -4,53 +4,58 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 40 - gocognit: - min-complexity: 40 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 150 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 linters: enable-all: true disable: - maligned + - unparam - lll - - gochecknoglobals - gochecknoinits - # scopelint is useful, but also reports false positives - # that unfortunately can't be disabled. So we disable the - # linter rather than changing code that works. - # see: https://github.com/kyoh86/scopelint/issues/4 - - scopelint + - gochecknoglobals + - funlen - godox - gocognit - #- whitespace + - whitespace - wsl - - funlen - - testpackage - wrapcheck - #- nlreturn + - testpackage + - nlreturn - gomnd - - goerr113 - exhaustivestruct - #- errorlint - #- nestif - - gofumpt + - goerr113 + - errorlint + - nestif - godot - - gci - - dogsled + - gofumpt - paralleltest - tparallel - thelper - ifshort - - forbidigo - - cyclop - - varnamelen - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md index aad6da10f..e005d4b37 100644 --- a/vendor/github.com/go-openapi/analysis/README.md +++ b/vendor/github.com/go-openapi/analysis/README.md @@ -1,8 +1,5 @@ -# OpenAPI initiative analysis +# OpenAPI analysis [![Build Status](https://github.com/go-openapi/analysis/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) -[![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) -[![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) -[![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) @@ -13,12 +10,12 @@ A foundational library to analyze an OAI specification document for easier reaso ## What's inside? -* A analyzer providing methods to walk the functional content of a specification +* An analyzer providing methods to walk the functional content of a specification * A spec flattener producing a self-contained document bundle, while preserving `$ref`s * A spec merger ("mixin") to merge several spec documents into a primary spec * A spec "fixer" ensuring that response descriptions are non empty -[Documentation](https://godoc.org/github.com/go-openapi/analysis) +[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis) ## FAQ @@ -28,4 +25,3 @@ A foundational library to analyze an OAI specification document for easier reaso > This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). > There is no plan to make it evolve toward supporting OpenAPI 3.x. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. -> diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml deleted file mode 100644 index c2f6fd733..000000000 --- a/vendor/github.com/go-openapi/analysis/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\analysis -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.16 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go index d5294c095..e8d9f9b13 100644 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ b/vendor/github.com/go-openapi/analysis/doc.go @@ -16,27 +16,27 @@ Package analysis provides methods to work with a Swagger specification document from package go-openapi/spec. -Analyzing a specification +## Analyzing a specification An analysed specification object (type Spec) provides methods to work with swagger definition. -Flattening or expanding a specification +## Flattening or expanding a specification Flattening a specification bundles all remote $ref in the main spec document. Depending on flattening options, additional preprocessing may take place: - full flattening: replacing all inline complex constructs by a named entry in #/definitions - expand: replace all $ref's in the document by their expanded content -Merging several specifications +## Merging several specifications Mixin several specifications merges all Swagger constructs, and warns about found conflicts. -Fixing a specification +## Fixing a specification Unmarshalling a specification with golang json unmarshalling may lead to some unwanted result on present but empty fields. -Analyzing a Swagger schema +## Analyzing a Swagger schema Swagger schemas are analyzed to determine their complexity and qualify their content. */ diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go index 0576220fb..ebedcc9df 100644 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ b/vendor/github.com/go-openapi/analysis/flatten.go @@ -62,28 +62,26 @@ func newContext() *context { // // There is a minimal and a full flattening mode. // -// // Minimally flattening a spec means: -// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left -// unscathed) -// - Importing external (http, file) references so they become internal to the document -// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers -// like "$ref": "#/definitions/myObject/allOfs/1") +// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left +// unscathed) +// - Importing external (http, file) references so they become internal to the document +// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers +// like "$ref": "#/definitions/myObject/allOfs/1") // // A minimally flattened spec thus guarantees the following properties: -// - all $refs point to a local definition (i.e. '#/definitions/...') -// - definitions are unique +// - all $refs point to a local definition (i.e. '#/definitions/...') +// - definitions are unique // // NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they // represent a complex schema or express commonality in the spec. // Otherwise, they are simply expanded. // Self-referencing JSON pointers cannot resolve to a type and trigger an error. // -// // Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. // // Fully flattening a spec means: -// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. +// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. // // By complex, we mean every JSON object with some properties. // Arrays, when they do not define a tuple, @@ -93,22 +91,21 @@ func newContext() *context { // have been created. // // Available flattening options: -// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched -// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) -// - Verbose: croaks about name conflicts detected -// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening +// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched +// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) +// - Verbose: croaks about name conflicts detected +// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening // // NOTE: expansion removes all $ref save circular $ref, which remain in place // // TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a -// x-go-name extension -// - LiftAllOfs: -// - limit the flattening of allOf members when simple objects -// - merge allOf with validation only -// - merge allOf with extensions only -// - ... -// +// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a +// x-go-name extension +// - LiftAllOfs: +// - limit the flattening of allOf members when simple objects +// - merge allOf with validation only +// - merge allOf with extensions only +// - ... func Flatten(opts FlattenOpts) error { debugLog("FlattenOpts: %#v", opts) @@ -270,6 +267,12 @@ func nameInlinedSchemas(opts *FlattenOpts) error { } func removeUnused(opts *FlattenOpts) { + for removeUnusedSinglePass(opts) { + // continue until no unused definition remains + } +} + +func removeUnusedSinglePass(opts *FlattenOpts) (hasRemoved bool) { expected := make(map[string]struct{}) for k := range opts.Swagger().Definitions { expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} @@ -280,6 +283,7 @@ func removeUnused(opts *FlattenOpts) { } for k := range expected { + hasRemoved = true debugLog("removing unused definition %s", path.Base(k)) if opts.Verbose { log.Printf("info: removing unused definition: %s", path.Base(k)) @@ -288,6 +292,8 @@ func removeUnused(opts *FlattenOpts) { } opts.Spec.reload() // re-analyze + + return hasRemoved } func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { @@ -334,7 +340,7 @@ func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) err } // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name - newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) + newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref, opts)) debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) opts.flattenContext.resolved[refStr] = newName @@ -488,9 +494,9 @@ func stripPointersAndOAIGen(opts *FlattenOpts) error { // stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. // // A dedupe is deemed unnecessary whenever: -// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) -// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to -// the first parent. +// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) +// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to +// the first parent. // // This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate // pointer and name resolution again. @@ -652,6 +658,7 @@ func namePointers(opts *FlattenOpts) error { refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) for k, ref := range opts.Spec.references.allRefs { + debugLog("name pointers: %q => %#v", k, ref) if path.Dir(ref.String()) == definitionsPath { // this a ref to a top-level definition: ok continue @@ -769,6 +776,10 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema // identifying edge case when the namer did nothing because we point to a non-schema object // no definition is created and we expand the $ref for all callers + debugLog("decide what to do with the schema pointed to: asch.IsSimpleSchema=%t, len(callers)=%d, parts.IsSharedParam=%t, parts.IsSharedResponse=%t", + asch.IsSimpleSchema, len(callers), parts.IsSharedParam(), parts.IsSharedResponse(), + ) + if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { @@ -791,6 +802,7 @@ func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]Schema return nil } + // everything that is a simple schema and not factorizable is expanded debugLog("expand JSON pointer for key=%s", key) if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go index 3ad2ccfbf..c7d7938eb 100644 --- a/vendor/github.com/go-openapi/analysis/flatten_name.go +++ b/vendor/github.com/go-openapi/analysis/flatten_name.go @@ -33,12 +33,14 @@ func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *Ana } // create unique name - newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) + mangle := mangler(isn.opts) + newName, isOAIGen := uniqifyName(isn.Spec.Definitions, mangle(name)) // clone schema sch := schutils.Clone(schema) // replace values on schema + debugLog("rewriting schema to ref: key=%s with new name: %s", key, newName) if err := replace.RewriteSchemaToRef(isn.Spec, key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) @@ -149,13 +151,15 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma startIndex int ) - if parts.IsOperation() { + switch { + case parts.IsOperation(): baseNames, startIndex = namesForOperation(parts, operations) - } - - // definitions - if parts.IsDefinition() { + case parts.IsDefinition(): baseNames, startIndex = namesForDefinition(parts) + default: + // this a non-standard pointer: build a name by concatenating its parts + baseNames = [][]string{parts} + startIndex = len(baseNames) + 1 } result := make([]string, 0, len(baseNames)) @@ -169,6 +173,7 @@ func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations ma } sort.Strings(result) + debugLog("names from parts: %v => %v", parts, result) return result } @@ -256,10 +261,20 @@ func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { } } -func nameFromRef(ref spec.Ref) string { +func mangler(o *FlattenOpts) func(string) string { + if o.KeepNames { + return func(in string) string { return in } + } + + return swag.ToJSONName +} + +func nameFromRef(ref spec.Ref, o *FlattenOpts) string { + mangle := mangler(o) + u := ref.GetURL() if u.Fragment != "" { - return swag.ToJSONName(path.Base(u.Fragment)) + return mangle(path.Base(u.Fragment)) } if u.Path != "" { @@ -267,19 +282,19 @@ func nameFromRef(ref spec.Ref) string { if bn != "" && bn != "/" { ext := path.Ext(bn) if ext != "" { - return swag.ToJSONName(bn[:len(bn)-len(ext)]) + return mangle(bn[:len(bn)-len(ext)]) } - return swag.ToJSONName(bn) + return mangle(bn) } } - return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " ")) + return mangle(strings.ReplaceAll(u.Host, ".", " ")) } // GenLocation indicates from which section of the specification (models or operations) a definition has been created. // -// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided +// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is provided // for information only. func GenLocation(parts sortref.SplitKey) string { switch { diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go index c5bb97b0a..c943fe1e8 100644 --- a/vendor/github.com/go-openapi/analysis/flatten_options.go +++ b/vendor/github.com/go-openapi/analysis/flatten_options.go @@ -26,6 +26,7 @@ type FlattenOpts struct { Verbose bool // enable some reporting on possible name conflicts detected RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening ContinueOnError bool // Continue when spec expansion issues are found + KeepNames bool // Do not attempt to jsonify names from references when flattening /* Extra keys */ _ struct{} // require keys diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go index ec0fec022..39f55a97b 100644 --- a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go +++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go @@ -29,7 +29,7 @@ var ( // GetLogger provides a prefix debug logger func GetLogger(prefix string, debug bool) func(string, ...interface{}) { if debug { - logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags) + logger := log.New(output, prefix+":", log.LstdFlags) return func(msg string, args ...interface{}) { _, file1, pos1, _ := runtime.Caller(1) @@ -37,5 +37,5 @@ func GetLogger(prefix string, debug bool) func(string, ...interface{}) { } } - return func(msg string, args ...interface{}) {} + return func(_ string, _ ...interface{}) {} } diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go index 26c2a05a3..c0f43e728 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go @@ -1,6 +1,7 @@ package replace import ( + "encoding/json" "fmt" "net/url" "os" @@ -40,6 +41,8 @@ func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { if refable.Schema != nil { refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} } + case map[string]interface{}: // this happens e.g. if a schema points to an extension unmarshaled as map[string]interface{} + return rewriteParentRef(sp, key, ref) default: return fmt.Errorf("no schema with ref found at %s for %T", key, value) } @@ -120,6 +123,9 @@ func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { case spec.SchemaProperties: container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + case *interface{}: + *container = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} + // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema default: @@ -318,8 +324,8 @@ type DeepestRefResult struct { } // DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. -// - if no definition is found, returns the deepest ref. -// - pointers to external files are expanded +// - if no definition is found, returns the deepest ref. +// - pointers to external files are expanded // // NOTE: all external $ref's are assumed to be already expanded at this stage. func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { @@ -385,8 +391,9 @@ DOWNREF: err := asSchema.UnmarshalJSON(asJSON) if err != nil { return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", + currentRef.String(), value, err, + ) } warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) @@ -402,8 +409,9 @@ DOWNREF: var asSchema spec.Schema if err := asSchema.UnmarshalJSON(asJSON); err != nil { return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) + fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T (%v)", + currentRef.String(), value, err, + ) } warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) @@ -414,9 +422,25 @@ DOWNREF: currentRef = asSchema.Ref default: - return nil, - fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", - currentRef.String(), value) + // fallback: attempts to resolve the pointer as a schema + if refable == nil { + break DOWNREF + } + + asJSON, _ := json.Marshal(refable) + var asSchema spec.Schema + if err := asSchema.UnmarshalJSON(asJSON); err != nil { + return nil, + fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T (%v)", + currentRef.String(), value, err, + ) + } + warnings = append(warnings, fmt.Sprintf("found $ref %q (%T) interpreted as schema", currentRef.String(), refable)) + + if asSchema.Ref.String() == "" { + break DOWNREF + } + currentRef = asSchema.Ref } } diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go index 18e552ead..ac80fc2e8 100644 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go +++ b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go @@ -69,7 +69,7 @@ func KeyParts(key string) SplitKey { return res } -// SplitKey holds of the parts of a /-separated key, soi that their location may be determined. +// SplitKey holds of the parts of a /-separated key, so that their location may be determined. type SplitKey []string // IsDefinition is true when the split key is in the #/definitions section of a spec diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go index b25305264..7785a29b2 100644 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -53,7 +53,7 @@ import ( // collisions. func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { skipped := make([]string, 0, len(mixins)) - opIds := getOpIds(primary) + opIDs := getOpIDs(primary) initPrimary(primary) for i, m := range mixins { @@ -74,7 +74,7 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { skipped = append(skipped, mergeDefinitions(primary, m)...) // merging paths requires a map of operationIDs to work with - skipped = append(skipped, mergePaths(primary, m, opIds, i)...) + skipped = append(skipped, mergePaths(primary, m, opIDs, i)...) skipped = append(skipped, mergeParameters(primary, m)...) @@ -84,9 +84,9 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { return skipped } -// getOpIds extracts all the paths..operationIds from the given +// getOpIDs extracts all the paths..operationIds from the given // spec and returns them as the keys in a map with 'true' values. -func getOpIds(s *spec.Swagger) map[string]bool { +func getOpIDs(s *spec.Swagger) map[string]bool { rv := make(map[string]bool) if s.Paths == nil { return rv @@ -179,7 +179,7 @@ func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) return } -func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) { if m.Paths != nil { for k, v := range m.Paths.Paths { if _, exists := primary.Paths.Paths[k]; exists { @@ -198,10 +198,10 @@ func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, m // all the proivded specs are already unique. piops := pathItemOps(v) for _, piop := range piops { - if opIds[piop.ID] { + if opIDs[piop.ID] { piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) } - opIds[piop.ID] = true + opIDs[piop.ID] = true } primary.Paths.Paths[k] = v } @@ -367,7 +367,7 @@ func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { return skipped } -// nolint: unparam +//nolint:unparam func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { if primary.Description == "" { primary.Description = m.Description diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go index fc055095c..ab190db5b 100644 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -1,7 +1,7 @@ package analysis import ( - "fmt" + "errors" "github.com/go-openapi/spec" "github.com/go-openapi/strfmt" @@ -19,7 +19,7 @@ type SchemaOpts struct { // patterns. func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { if opts.Schema == nil { - return nil, fmt.Errorf("no schema to analyze") + return nil, errors.New("no schema to analyze") } a := &AnalyzedSchema{ @@ -247,10 +247,10 @@ func (a *AnalyzedSchema) isArrayType() bool { // isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). // // Complex means the schema is any of: -// - a simple type (primitive) -// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) -// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will -// generate a definition) +// - a simple type (primitive) +// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) +// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will +// generate a definition) func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { return !a.IsSimpleSchema && !a.IsArray && !a.IsMap } diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 000000000..22f8d21cc --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 813788aff..0108f1d57 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,6 +1,10 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language ## Status diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index 7df9853de..d970c7cf4 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -26,6 +26,7 @@ package jsonpointer import ( + "encoding/json" "errors" "fmt" "reflect" @@ -40,6 +41,7 @@ const ( pointerSeparator = `/` invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator + notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process type JSONPointable interface { - JSONLookup(string) (interface{}, error) + JSONLookup(string) (any, error) } // JSONSetable is an interface for structs to implement when they need to customize the // json pointer process type JSONSetable interface { - JSONSet(string, interface{}) error + JSONSet(string, any) error } // New creates a new json pointer for the given string @@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error { err = errors.New(invalidStart) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) } } @@ -91,38 +91,58 @@ func (p *Pointer) parse(jsonPointerString string) error { } // Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { +func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, swag.DefaultJSONNameProvider) } // GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } // SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { +func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + +func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + } - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) if err != nil { return nil, kind, err } return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -159,7 +179,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { +func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) if ns, ok := node.(JSONSetable); ok { // pointer impl @@ -170,7 +190,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw return node.(JSONSetable).JSONSet(decodedToken, data) } - switch rValue.Kind() { + switch rValue.Kind() { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -210,7 +230,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw } -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { if nameProvider == nil { nameProvider = swag.DefaultJSONNameProvider @@ -231,8 +251,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf if err != nil { return nil, knd, err } - node, kind = r, knd - + node = r } rValue := reflect.ValueOf(node) @@ -241,11 +260,11 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf return node, kind, nil } -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { @@ -284,7 +303,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e continue } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -363,6 +382,128 @@ func (p *Pointer) String() string { return pointerString } +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return offset, nil +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return 0, fmt.Errorf("token reference %q not found", decodedToken) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + } + } + + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found", decodedToken) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return err + } + case '[': + if err = drainSingle(dec); err != nil { + return err + } + } + } + } + + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + // Specific JSON pointer encoding here // ~0 => ~ // ~1 => / @@ -377,14 +518,14 @@ const ( // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) + step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) return step2 } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) + step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) return step2 } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 013fc1943..22f8d21cc 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,50 +1,61 @@ linters-settings: govet: check-shadowing: true + golint: + min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 - paralleltest: - ignore-missing: true + min-occurrences: 3 + linters: enable-all: true disable: - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint - - varcheck - - interfacer - - deadcode - - golint + - gofumpt + - paralleltest + - tparallel + - thelper - ifshort - - structcheck - - nosnakecase - - varnamelen - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index b94753aa5..c7fc2049c 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -1,15 +1,19 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) An implementation of JSON Reference - Go language ## Status Feature complete. Stable API ## Dependencies -https://github.com/go-openapi/jsonpointer +* https://github.com/go-openapi/jsonpointer ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml index d48b4a515..22f8d21cc 100644 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -4,41 +4,58 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 linters: enable-all: true disable: - maligned + - unparam - lll - - gochecknoglobals - gochecknoinits + - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint + - gofumpt - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md index df1f62646..f8bd440df 100644 --- a/vendor/github.com/go-openapi/loads/README.md +++ b/vendor/github.com/go-openapi/loads/README.md @@ -1,4 +1,4 @@ -# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![Actions/Go Test Status](https://github.com/go-openapi/loads/workflows/Go%20Test/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"Go+Test") +# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go index 3046da4ce..5bcaef5db 100644 --- a/vendor/github.com/go-openapi/loads/doc.go +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -12,10 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package loads provides document loading methods for swagger (OAI) specifications. - -It is used by other go-openapi packages to load and run analysis on local or remote spec documents. - -*/ +// Package loads provides document loading methods for swagger (OAI) specifications. +// +// It is used by other go-openapi packages to load and run analysis on local or remote spec documents. package loads diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go index 44bd32b5b..b2d1e034c 100644 --- a/vendor/github.com/go-openapi/loads/loaders.go +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -21,7 +21,7 @@ var ( func init() { jsonLoader := &loader{ DocLoaderWithMatch: DocLoaderWithMatch{ - Match: func(pth string) bool { + Match: func(_ string) bool { return true }, Fn: JSONDoc, @@ -86,7 +86,7 @@ func (l *loader) Load(path string) (json.RawMessage, error) { return nil, erp } - var lastErr error = errors.New("no loader matched") // default error if no match was found + lastErr := errors.New("no loader matched") // default error if no match was found for ldr := l; ldr != nil; ldr = ldr.Next { if ldr.Match != nil && !ldr.Match(path) { continue @@ -118,9 +118,8 @@ func JSONDoc(path string) (json.RawMessage, error) { // This sets the configuration at the package level. // // NOTE: -// * this updates the default loader used by github.com/go-openapi/spec -// * since this sets package level globals, you shouln't call this concurrently -// +// - this updates the default loader used by github.com/go-openapi/spec +// - since this sets package level globals, you shouln't call this concurrently func AddLoader(predicate DocMatcher, load DocLoader) { loaders = loaders.WithHead(&loader{ DocLoaderWithMatch: DocLoaderWithMatch{ diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go index 93c8d4b89..c9039cd5d 100644 --- a/vendor/github.com/go-openapi/loads/spec.go +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -38,8 +38,8 @@ type Document struct { specFilePath string origSpec *spec.Swagger schema *spec.Schema - raw json.RawMessage pathLoader *loader + raw json.RawMessage } // JSONSpec loads a spec from a json document @@ -49,7 +49,14 @@ func JSONSpec(path string, options ...LoaderOption) (*Document, error) { return nil, err } // convert to json - return Analyzed(data, "", options...) + doc, err := Analyzed(data, "", options...) + if err != nil { + return nil, err + } + + doc.specFilePath = path + + return doc, nil } // Embedded returns a Document based on embedded specs. No analysis is required @@ -71,7 +78,6 @@ func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, e // Spec loads a new spec document from a local or remote path func Spec(path string, options ...LoaderOption) (*Document, error) { - ldr := loaderFromOptions(options) b, err := ldr.Load(path) @@ -84,12 +90,10 @@ func Spec(path string, options ...LoaderOption) (*Document, error) { return nil, err } - if document != nil { - document.specFilePath = path - document.pathLoader = ldr - } + document.specFilePath = path + document.pathLoader = ldr - return document, err + return document, nil } // Analyzed creates a new analyzed spec document for a root json.RawMessage. @@ -117,7 +121,7 @@ func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*D } d := &Document{ - Analyzer: analysis.New(swspec), + Analyzer: analysis.New(swspec), // NOTE: at this moment, analysis does not follow $refs to documents outside the root doc schema: spec.MustLoadSwagger20Schema(), spec: swspec, raw: raw, @@ -152,9 +156,8 @@ func trimData(in json.RawMessage) (json.RawMessage, error) { return d, nil } -// Expanded expands the ref fields in the spec document and returns a new spec document +// Expanded expands the $ref fields in the spec document and returns a new spec document func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { - swspec := new(spec.Swagger) if err := json.Unmarshal(d.raw, swspec); err != nil { return nil, err @@ -163,6 +166,9 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { var expandOptions *spec.ExpandOptions if len(options) > 0 { expandOptions = options[0] + if expandOptions.RelativeBase == "" { + expandOptions.RelativeBase = d.specFilePath + } } else { expandOptions = &spec.ExpandOptions{ RelativeBase: d.specFilePath, @@ -194,7 +200,7 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { return dd, nil } -// BasePath the base path for this spec +// BasePath the base path for the API specified by this spec func (d *Document) BasePath() string { return d.spec.BasePath } @@ -242,8 +248,11 @@ func (d *Document) ResetDefinitions() *Document { // Pristine creates a new pristine document instance based on the input data func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) + raw, _ := json.Marshal(d.Spec()) + dd, _ := Analyzed(raw, d.Version()) dd.pathLoader = d.pathLoader + dd.specFilePath = d.specFilePath + return dd } diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml index b1aa7928a..1c75557ba 100644 --- a/vendor/github.com/go-openapi/runtime/.golangci.yml +++ b/vendor/github.com/go-openapi/runtime/.golangci.yml @@ -1,44 +1,62 @@ linters-settings: govet: - # Using err repeatedly considered as shadowing. - check-shadowing: false + check-shadowing: true golint: min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 + min-occurrences: 3 + linters: + enable-all: true disable: + - nilerr # nilerr crashes on this repo - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint - - noctx + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode - interfacer - - nilerr + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md index 5b1ec6494..b07e0ad9d 100644 --- a/vendor/github.com/go-openapi/runtime/README.md +++ b/vendor/github.com/go-openapi/runtime/README.md @@ -1,7 +1,10 @@ -# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# runtime [![Build Status](https://github.com/go-openapi/runtime/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/runtime/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/runtime.svg)](https://pkg.go.dev/github.com/go-openapi/runtime) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/runtime)](https://goreportcard.com/report/github.com/go-openapi/runtime) -# golang Open-API toolkit - runtime +# go OpenAPI toolkit runtime -The runtime component for use in codegeneration or as untyped usage. +The runtime component for use in code generation or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go index 6eb6ceb5c..f8fb48223 100644 --- a/vendor/github.com/go-openapi/runtime/bytestream.go +++ b/vendor/github.com/go-openapi/runtime/bytestream.go @@ -38,9 +38,16 @@ type byteStreamOpts struct { Close bool } -// ByteStreamConsumer creates a consumer for byte streams, -// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, -// and reads from the provided reader +// ByteStreamConsumer creates a consumer for byte streams. +// +// The consumer consumes from a provided reader into the data passed by reference. +// +// Supported output underlying types and interfaces, prioritized in this order: +// - io.ReaderFrom (for maximum control) +// - io.Writer (performs io.Copy) +// - encoding.BinaryUnmarshaler +// - *string +// - *[]byte func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { var vals byteStreamOpts for _, opt := range opts { @@ -51,44 +58,70 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { if reader == nil { return errors.New("ByteStreamConsumer requires a reader") // early exit } + if data == nil { + return errors.New("nil destination for ByteStreamConsumer") + } - close := defaultCloser + closer := defaultCloser if vals.Close { - if cl, ok := reader.(io.Closer); ok { - close = cl.Close + if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { + closer = cl.Close } } - //nolint:errcheck // closing a reader wouldn't fail. - defer close() + defer func() { + _ = closer() + }() - if wrtr, ok := data.(io.Writer); ok { - _, err := io.Copy(wrtr, reader) + if readerFrom, isReaderFrom := data.(io.ReaderFrom); isReaderFrom { + _, err := readerFrom.ReadFrom(reader) return err } - buf := new(bytes.Buffer) + if writer, isDataWriter := data.(io.Writer); isDataWriter { + _, err := io.Copy(writer, reader) + return err + } + + // buffers input before writing to data + var buf bytes.Buffer _, err := buf.ReadFrom(reader) if err != nil { return err } b := buf.Bytes() - if bu, ok := data.(encoding.BinaryUnmarshaler); ok { - return bu.UnmarshalBinary(b) - } + switch destinationPointer := data.(type) { + case encoding.BinaryUnmarshaler: + return destinationPointer.UnmarshalBinary(b) + case *any: + switch (*destinationPointer).(type) { + case string: + *destinationPointer = string(b) + + return nil + + case []byte: + *destinationPointer = b - if data != nil { - if str, ok := data.(*string); ok { - *str = string(b) return nil } - } + default: + // check for the underlying type to be pointer to []byte or string, + if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { + return errors.New("destination must be a pointer") + } - if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { v := reflect.Indirect(reflect.ValueOf(data)) - if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: v.SetBytes(b) return nil + + case t.Kind() == reflect.String: + v.SetString(string(b)) + return nil } } @@ -97,67 +130,87 @@ func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { }) } -// ByteStreamProducer creates a producer for byte streams, -// takes a Reader/BinaryMarshaler interface or binary slice, -// and writes to a writer (essentially a pipe) +// ByteStreamProducer creates a producer for byte streams. +// +// The producer takes input data then writes to an output writer (essentially as a pipe). +// +// Supported input underlying types and interfaces, prioritized in this order: +// - io.WriterTo (for maximum control) +// - io.Reader (performs io.Copy). A ReadCloser is closed before exiting. +// - encoding.BinaryMarshaler +// - error (writes as a string) +// - []byte +// - string +// - struct, other slices: writes as JSON func ByteStreamProducer(opts ...byteStreamOpt) Producer { var vals byteStreamOpts for _, opt := range opts { opt(&vals) } + return ProducerFunc(func(writer io.Writer, data interface{}) error { if writer == nil { return errors.New("ByteStreamProducer requires a writer") // early exit } - close := defaultCloser + if data == nil { + return errors.New("nil data for ByteStreamProducer") + } + + closer := defaultCloser if vals.Close { - if cl, ok := writer.(io.Closer); ok { - close = cl.Close + if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { + closer = cl.Close } } - //nolint:errcheck // TODO: closing a writer would fail. - defer close() + defer func() { + _ = closer() + }() - if rc, ok := data.(io.ReadCloser); ok { + if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { defer rc.Close() } - if rdr, ok := data.(io.Reader); ok { - _, err := io.Copy(writer, rdr) + switch origin := data.(type) { + case io.WriterTo: + _, err := origin.WriteTo(writer) return err - } - if bm, ok := data.(encoding.BinaryMarshaler); ok { - bytes, err := bm.MarshalBinary() + case io.Reader: + _, err := io.Copy(writer, origin) + return err + + case encoding.BinaryMarshaler: + bytes, err := origin.MarshalBinary() if err != nil { return err } _, err = writer.Write(bytes) return err - } - if data != nil { - if str, ok := data.(string); ok { - _, err := writer.Write([]byte(str)) - return err - } - - if e, ok := data.(error); ok { - _, err := writer.Write([]byte(e.Error())) - return err - } + case error: + _, err := writer.Write([]byte(origin.Error())) + return err + default: v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: _, err := writer.Write(v.Bytes()) return err - } - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { + + case t.Kind() == reflect.String: + _, err := writer.Write([]byte(v.String())) + return err + + case t.Kind() == reflect.Struct || t.Kind() == reflect.Slice: b, err := swag.WriteJSON(data) if err != nil { return err } + _, err = writer.Write(b) return err } diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go index fa21eacf3..5a5d63563 100644 --- a/vendor/github.com/go-openapi/runtime/client_operation.go +++ b/vendor/github.com/go-openapi/runtime/client_operation.go @@ -30,12 +30,12 @@ type ClientOperation struct { AuthInfo ClientAuthInfoWriter Params ClientRequestWriter Reader ClientResponseReader - Context context.Context + Context context.Context //nolint:containedctx // we precisely want this type to contain the request context Client *http.Client } // A ClientTransport implementor knows how to submit Request objects to some destination type ClientTransport interface { - //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) + // Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) Submit(*ClientOperation) (interface{}, error) } diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go index d4d2b58f2..4ebb2deab 100644 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ b/vendor/github.com/go-openapi/runtime/client_request.go @@ -37,8 +37,8 @@ type ClientRequestWriter interface { } // ClientRequest is an interface for things that know how to -// add information to a swagger client request -type ClientRequest interface { +// add information to a swagger client request. +type ClientRequest interface { //nolint:interfacebloat // a swagger-capable request is quite rich, hence the many getter/setters SetHeaderParam(string, ...string) error GetHeaderParams() http.Header diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go index d807bd915..c9597bcd6 100644 --- a/vendor/github.com/go-openapi/runtime/csv.go +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -16,62 +16,335 @@ package runtime import ( "bytes" + "context" + "encoding" "encoding/csv" "errors" + "fmt" "io" + "reflect" + + "golang.org/x/sync/errgroup" ) -// CSVConsumer creates a new CSV consumer -func CSVConsumer() Consumer { +// CSVConsumer creates a new CSV consumer. +// +// The consumer consumes CSV records from a provided reader into the data passed by reference. +// +// CSVOpts options may be specified to alter the default CSV behavior on the reader and the writer side (e.g. separator, skip header, ...). +// The defaults are those of the standard library's csv.Reader and csv.Writer. +// +// Supported output underlying types and interfaces, prioritized in this order: +// - *csv.Writer +// - CSVWriter (writer options are ignored) +// - io.Writer (as raw bytes) +// - io.ReaderFrom (as raw bytes) +// - encoding.BinaryUnmarshaler (as raw bytes) +// - *[][]string (as a collection of records) +// - *[]byte (as raw bytes) +// - *string (a raw bytes) +// +// The consumer prioritizes situations where buffering the input is not required. +func CSVConsumer(opts ...CSVOpt) Consumer { + o := csvOptsWithDefaults(opts) + return ConsumerFunc(func(reader io.Reader, data interface{}) error { if reader == nil { return errors.New("CSVConsumer requires a reader") } + if data == nil { + return errors.New("nil destination for CSVConsumer") + } csvReader := csv.NewReader(reader) - writer, ok := data.(io.Writer) - if !ok { - return errors.New("data type must be io.Writer") - } - csvWriter := csv.NewWriter(writer) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err + o.applyToReader(csvReader) + closer := defaultCloser + if o.closeStream { + if cl, isReaderCloser := reader.(io.Closer); isReaderCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + switch destination := data.(type) { + case *csv.Writer: + csvWriter := destination + o.applyToWriter(csvWriter) + + return pipeCSV(csvWriter, csvReader, o) + + case CSVWriter: + csvWriter := destination + // no writer options available + + return pipeCSV(csvWriter, csvReader, o) + + case io.Writer: + csvWriter := csv.NewWriter(destination) + o.applyToWriter(csvWriter) + + return pipeCSV(csvWriter, csvReader, o) + + case io.ReaderFrom: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + _, err := destination.ReadFrom(&buf) + + return err + + case encoding.BinaryUnmarshaler: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + + return destination.UnmarshalBinary(buf.Bytes()) + + default: + // support *[][]string, *[]byte, *string + if ptr := reflect.TypeOf(data); ptr.Kind() != reflect.Ptr { + return errors.New("destination must be a pointer") + } + + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: + csvWriter := &csvRecordsWriter{} + // writer options are ignored + if err := pipeCSV(csvWriter, csvReader, o); err != nil { + return err + } + + v.Grow(len(csvWriter.records)) + v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity + v.SetLen(len(csvWriter.records)) + reflect.Copy(v, reflect.ValueOf(csvWriter.records)) + + return nil + + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + v.SetBytes(buf.Bytes()) + + return nil + + case t.Kind() == reflect.String: + var buf bytes.Buffer + csvWriter := csv.NewWriter(&buf) + o.applyToWriter(csvWriter) + if err := bufferedCSV(csvWriter, csvReader, o); err != nil { + return err + } + v.SetString(buf.String()) + + return nil + + default: + return fmt.Errorf("%v (%T) is not supported by the CSVConsumer, %s", + data, data, "can be resolved by supporting CSVWriter/Writer/BinaryUnmarshaler interface", + ) } } - csvWriter.Flush() - return nil }) } -// CSVProducer creates a new CSV producer -func CSVProducer() Producer { +// CSVProducer creates a new CSV producer. +// +// The producer takes input data then writes as CSV to an output writer (essentially as a pipe). +// +// Supported input underlying types and interfaces, prioritized in this order: +// - *csv.Reader +// - CSVReader (reader options are ignored) +// - io.Reader +// - io.WriterTo +// - encoding.BinaryMarshaler +// - [][]string +// - []byte +// - string +// +// The producer prioritizes situations where buffering the input is not required. +func CSVProducer(opts ...CSVOpt) Producer { + o := csvOptsWithDefaults(opts) + return ProducerFunc(func(writer io.Writer, data interface{}) error { if writer == nil { return errors.New("CSVProducer requires a writer") } - - dataBytes, ok := data.([]byte) - if !ok { - return errors.New("data type must be byte array") + if data == nil { + return errors.New("nil data for CSVProducer") } - csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) - records, err := csvReader.ReadAll() - if err != nil { - return err - } csvWriter := csv.NewWriter(writer) - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err + o.applyToWriter(csvWriter) + closer := defaultCloser + if o.closeStream { + if cl, isWriterCloser := writer.(io.Closer); isWriterCloser { + closer = cl.Close + } + } + defer func() { + _ = closer() + }() + + if rc, isDataCloser := data.(io.ReadCloser); isDataCloser { + defer rc.Close() + } + + switch origin := data.(type) { + case *csv.Reader: + csvReader := origin + o.applyToReader(csvReader) + + return pipeCSV(csvWriter, csvReader, o) + + case CSVReader: + csvReader := origin + // no reader options available + + return pipeCSV(csvWriter, csvReader, o) + + case io.Reader: + csvReader := csv.NewReader(origin) + o.applyToReader(csvReader) + + return pipeCSV(csvWriter, csvReader, o) + + case io.WriterTo: + // async piping of the writes performed by WriteTo + r, w := io.Pipe() + csvReader := csv.NewReader(r) + o.applyToReader(csvReader) + + pipe, _ := errgroup.WithContext(context.Background()) + pipe.Go(func() error { + _, err := origin.WriteTo(w) + _ = w.Close() + return err + }) + + pipe.Go(func() error { + defer func() { + _ = r.Close() + }() + + return pipeCSV(csvWriter, csvReader, o) + }) + + return pipe.Wait() + + case encoding.BinaryMarshaler: + buf, err := origin.MarshalBinary() + if err != nil { + return err + } + rdr := bytes.NewBuffer(buf) + csvReader := csv.NewReader(rdr) + + return bufferedCSV(csvWriter, csvReader, o) + + default: + // support [][]string, []byte, string (or pointers to those) + v := reflect.Indirect(reflect.ValueOf(data)) + t := v.Type() + + switch { + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Slice && t.Elem().Elem().Kind() == reflect.String: + csvReader := &csvRecordsWriter{ + records: make([][]string, v.Len()), + } + reflect.Copy(reflect.ValueOf(csvReader.records), v) + + return pipeCSV(csvWriter, csvReader, o) + + case t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8: + buf := bytes.NewBuffer(v.Bytes()) + csvReader := csv.NewReader(buf) + o.applyToReader(csvReader) + + return bufferedCSV(csvWriter, csvReader, o) + + case t.Kind() == reflect.String: + buf := bytes.NewBufferString(v.String()) + csvReader := csv.NewReader(buf) + o.applyToReader(csvReader) + + return bufferedCSV(csvWriter, csvReader, o) + + default: + return fmt.Errorf("%v (%T) is not supported by the CSVProducer, %s", + data, data, "can be resolved by supporting CSVReader/Reader/BinaryMarshaler interface", + ) } } - csvWriter.Flush() - return nil }) } + +// pipeCSV copies CSV records from a CSV reader to a CSV writer +func pipeCSV(csvWriter CSVWriter, csvReader CSVReader, opts csvOpts) error { + for ; opts.skippedLines > 0; opts.skippedLines-- { + _, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } + + for { + record, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return err + } + + if err := csvWriter.Write(record); err != nil { + return err + } + } + + csvWriter.Flush() + + return csvWriter.Error() +} + +// bufferedCSV copies CSV records from a CSV reader to a CSV writer, +// by first reading all records then writing them at once. +func bufferedCSV(csvWriter *csv.Writer, csvReader *csv.Reader, opts csvOpts) error { + for ; opts.skippedLines > 0; opts.skippedLines-- { + _, err := csvReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + return err + } + } + + records, err := csvReader.ReadAll() + if err != nil { + return err + } + + return csvWriter.WriteAll(records) +} diff --git a/vendor/github.com/go-openapi/runtime/csv_options.go b/vendor/github.com/go-openapi/runtime/csv_options.go new file mode 100644 index 000000000..c16464c57 --- /dev/null +++ b/vendor/github.com/go-openapi/runtime/csv_options.go @@ -0,0 +1,121 @@ +package runtime + +import ( + "encoding/csv" + "io" +) + +// CSVOpts alter the behavior of the CSV consumer or producer. +type CSVOpt func(*csvOpts) + +type csvOpts struct { + csvReader csv.Reader + csvWriter csv.Writer + skippedLines int + closeStream bool +} + +// WithCSVReaderOpts specifies the options to csv.Reader +// when reading CSV. +func WithCSVReaderOpts(reader csv.Reader) CSVOpt { + return func(o *csvOpts) { + o.csvReader = reader + } +} + +// WithCSVWriterOpts specifies the options to csv.Writer +// when writing CSV. +func WithCSVWriterOpts(writer csv.Writer) CSVOpt { + return func(o *csvOpts) { + o.csvWriter = writer + } +} + +// WithCSVSkipLines will skip header lines. +func WithCSVSkipLines(skipped int) CSVOpt { + return func(o *csvOpts) { + o.skippedLines = skipped + } +} + +func WithCSVClosesStream() CSVOpt { + return func(o *csvOpts) { + o.closeStream = true + } +} + +func (o csvOpts) applyToReader(in *csv.Reader) { + if o.csvReader.Comma != 0 { + in.Comma = o.csvReader.Comma + } + if o.csvReader.Comment != 0 { + in.Comment = o.csvReader.Comment + } + if o.csvReader.FieldsPerRecord != 0 { + in.FieldsPerRecord = o.csvReader.FieldsPerRecord + } + + in.LazyQuotes = o.csvReader.LazyQuotes + in.TrimLeadingSpace = o.csvReader.TrimLeadingSpace + in.ReuseRecord = o.csvReader.ReuseRecord +} + +func (o csvOpts) applyToWriter(in *csv.Writer) { + if o.csvWriter.Comma != 0 { + in.Comma = o.csvWriter.Comma + } + in.UseCRLF = o.csvWriter.UseCRLF +} + +func csvOptsWithDefaults(opts []CSVOpt) csvOpts { + var o csvOpts + for _, apply := range opts { + apply(&o) + } + + return o +} + +type CSVWriter interface { + Write([]string) error + Flush() + Error() error +} + +type CSVReader interface { + Read() ([]string, error) +} + +var ( + _ CSVWriter = &csvRecordsWriter{} + _ CSVReader = &csvRecordsWriter{} +) + +// csvRecordsWriter is an internal container to move CSV records back and forth +type csvRecordsWriter struct { + i int + records [][]string +} + +func (w *csvRecordsWriter) Write(record []string) error { + w.records = append(w.records, record) + + return nil +} + +func (w *csvRecordsWriter) Read() ([]string, error) { + if w.i >= len(w.records) { + return nil, io.EOF + } + defer func() { + w.i++ + }() + + return w.records[w.i], nil +} + +func (w *csvRecordsWriter) Flush() {} + +func (w *csvRecordsWriter) Error() error { + return nil +} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go index 078fda173..9e3e1ecb1 100644 --- a/vendor/github.com/go-openapi/runtime/request.go +++ b/vendor/github.com/go-openapi/runtime/request.go @@ -16,6 +16,8 @@ package runtime import ( "bufio" + "context" + "errors" "io" "net/http" "strings" @@ -96,10 +98,16 @@ func (p *peekingReader) Read(d []byte) (int, error) { if p == nil { return 0, io.EOF } + if p.underlying == nil { + return 0, io.ErrUnexpectedEOF + } return p.underlying.Read(d) } func (p *peekingReader) Close() error { + if p.underlying == nil { + return errors.New("reader already closed") + } p.underlying = nil if p.orig != nil { return p.orig.Close() @@ -107,9 +115,11 @@ func (p *peekingReader) Close() error { return nil } -// JSONRequest creates a new http request with json headers set +// JSONRequest creates a new http request with json headers set. +// +// It uses context.Background. func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) + req, err := http.NewRequestWithContext(context.Background(), method, urlStr, body) if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore index dd91ed6a0..f47cb2045 100644 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -1,2 +1 @@ -secrets.yml -coverage.out +*.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml index 835d55e74..22f8d21cc 100644 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ b/vendor/github.com/go-openapi/spec/.golangci.yml @@ -11,7 +11,7 @@ linters-settings: threshold: 200 goconst: min-len: 2 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -40,3 +40,22 @@ linters: - tparallel - thelper - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 18782c6da..7fd2810c6 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -1,8 +1,5 @@ -# OAI object model +# OpenAPI v2 object model [![Build Status](https://github.com/go-openapi/spec/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/spec/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) -[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) - -[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) @@ -32,3 +29,26 @@ The object model for OpenAPI specification documents. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. > > An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 + +* Does the unmarshaling support YAML? + +> Not directly. The exposed types know only how to unmarshal from JSON. +> +> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by +> github.com/go-openapi/loads +> +> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> +> See also https://github.com/go-openapi/spec/issues/164 + +* How can I validate a spec? + +> Validation is provided by [the validate package](http://github.com/go-openapi/validate) + +* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? + +> We found jsonschema compatibility more important: since `id` in jsonschema influences +> how `$ref` are resolved. +> This `id` does not conflict with any property named `id`. +> +> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml deleted file mode 100644 index 090359391..000000000 --- a/vendor/github.com/go-openapi/spec/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\spec -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index afc83850c..000000000 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-bindata. DO NOT EDIT. -// sources: -// schemas/jsonschema-draft-04.json (4.357kB) -// schemas/v2/schema.json (40.248kB) - -package spec - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo - digest [sha256.Size]byte -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") - -func jsonschemaDraft04JsonBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04Json, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04Json() (*asset, error) { - bytes, err := jsonschemaDraft04JsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} - return a, nil -} - -var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00") - -func v2SchemaJsonBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJson, - "v2/schema.json", - ) -} - -func v2SchemaJson() (*asset, error) { - bytes, err := v2SchemaJsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// AssetString returns the asset contents as a string (instead of a []byte). -func AssetString(name string) (string, error) { - data, err := Asset(name) - return string(data), err -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// MustAssetString is like AssetString but panics when Asset would return an -// error. It simplifies safe initialization of global variables. -func MustAssetString(name string) string { - return string(MustAsset(name)) -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetDigest returns the digest of the file with the given name. It returns an -// error if the asset could not be found or the digest could not be loaded. -func AssetDigest(name string) ([sha256.Size]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) - } - return a.digest, nil - } - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) -} - -// Digests returns a map of all known files and their checksums. -func Digests() (map[string][sha256.Size]byte, error) { - mp := make(map[string][sha256.Size]byte, len(_bindata)) - for name := range _bindata { - a, err := _bindata[name]() - if err != nil { - return nil, err - } - mp[name] = a.digest - } - return mp, nil -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04Json, - - "v2/schema.json": v2SchemaJson, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"}, -// AssetDir("data/img") would return []string{"a.png", "b.png"}, -// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - canonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(canonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}}, - "v2": {nil, map[string]*bintree{ - "schema.json": {v2SchemaJson, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory. -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) -} - -// RestoreAssets restores an asset under the given directory recursively. -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - canonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) -} diff --git a/vendor/github.com/go-openapi/spec/embed.go b/vendor/github.com/go-openapi/spec/embed.go new file mode 100644 index 000000000..1f4284750 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/embed.go @@ -0,0 +1,17 @@ +package spec + +import ( + "embed" + "path" +) + +//go:embed schemas/*.json schemas/*/*.json +var assets embed.FS + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "jsonschema-draft-04.json")) +} + +func v2SchemaJSONBytes() ([]byte, error) { + return assets.ReadFile(path.Join("schemas", "v2", "schema.json")) +} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index d4ea889d4..b81a5699a 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -27,7 +27,6 @@ import ( // all relative $ref's will be resolved from there. // // PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. -// type ExpandOptions struct { RelativeBase string // the path to the root document to expand. This is a file, not a directory SkipSchemas bool // do not expand schemas, just paths, parameters and responses @@ -58,7 +57,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { if !options.SkipSchemas { for key, definition := range spec.Definitions { parentRefs := make([]string, 0, 10) - parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) + parentRefs = append(parentRefs, "#/definitions/"+key) def, err := expandSchema(definition, parentRefs, resolver, specBasePath) if resolver.shouldStopOnError(err) { @@ -103,15 +102,21 @@ const rootBase = ".root" // baseForRoot loads in the cache the root document and produces a fake ".root" base path entry // for further $ref resolution -// -// Setting the cache is optional and this parameter may safely be left to nil. func baseForRoot(root interface{}, cache ResolutionCache) string { - if root == nil { - return "" - } - // cache the root document to resolve $ref's normalizedBase := normalizeBase(rootBase) + + if root == nil { + // ensure that we never leave a nil root: always cache the root base pseudo-document + cachedRoot, found := cache.Get(normalizedBase) + if found && cachedRoot != nil { + // the cache is already preloaded with a root + return normalizedBase + } + + root = map[string]interface{}{} + } + cache.Set(normalizedBase, root) return normalizedBase @@ -208,7 +213,19 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba } if target.Ref.String() != "" { - return expandSchemaRef(target, parentRefs, resolver, basePath) + if !resolver.options.SkipSchemas { + return expandSchemaRef(target, parentRefs, resolver, basePath) + } + + // when "expand" with SkipSchema, we just rebase the existing $ref without replacing + // the full schema. + rebasedRef, err := NewRef(normalizeURI(target.Ref.String(), basePath)) + if err != nil { + return nil, err + } + target.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) + + return &target, nil } for k := range target.Definitions { @@ -520,21 +537,25 @@ func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { } func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { - ref, _, err := getRefAndSchema(input) + ref, sch, err := getRefAndSchema(input) if err != nil { return err } - if ref == nil { + if ref == nil && sch == nil { // nothing to do return nil } parentRefs := make([]string, 0, 10) - if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err + if ref != nil { + // dereference this $ref + if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { + return err + } + + ref, sch, _ = getRefAndSchema(input) } - ref, sch, _ := getRefAndSchema(input) if ref.String() != "" { transitiveResolver := resolver.transitiveResolver(basePath, *ref) basePath = resolver.updateBasePath(transitiveResolver, basePath) @@ -546,6 +567,7 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa if ref != nil { *ref = Ref{} } + return nil } @@ -555,38 +577,29 @@ func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePa return ern } - switch { - case resolver.isCircular(&rebasedRef, basePath, parentRefs...): + if resolver.isCircular(&rebasedRef, basePath, parentRefs...) { // this is a circular $ref: stop expansion if !resolver.options.AbsoluteCircularRef { sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) } else { sch.Ref = rebasedRef } - case !resolver.options.SkipSchemas: - // schema expanded to a $ref in another root - sch.Ref = rebasedRef - debugLog("rebased to: %s", sch.Ref.String()) - default: - // skip schema expansion but rebase $ref to schema - sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) } } + // $ref expansion or rebasing is performed by expandSchema below if ref != nil { *ref = Ref{} } // expand schema - if !resolver.options.SkipSchemas { - s, err := expandSchema(*sch, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return err - } - if s == nil { - // guard for when continuing on error - return nil - } + // yes, we do it even if options.SkipSchema is true: we have to go down that rabbit hole and rebase nested $ref) + s, err := expandSchema(*sch, parentRefs, resolver, basePath) + if resolver.shouldStopOnError(err) { + return err + } + + if s != nil { // guard for when continuing on error *sch = *s } diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go index 2df072315..f19f1a8fb 100644 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -40,5 +40,5 @@ func repairURI(in string) (*url.URL, string) { return u, "" } -func fixWindowsURI(u *url.URL, in string) { +func fixWindowsURI(_ *url.URL, _ string) { } diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go index 995ce6acb..a69cca881 100644 --- a/vendor/github.com/go-openapi/spec/operation.go +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -217,9 +217,12 @@ func (o *Operation) AddParam(param *Parameter) *Operation { for i, p := range o.Parameters { if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) + params := make([]Parameter, 0, len(o.Parameters)+1) + params = append(params, o.Parameters[:i]...) + params = append(params, *param) params = append(params, o.Parameters[i+1:]...) o.Parameters = params + return o } } diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go index 2b2b89b67..bd4f1cdb0 100644 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -84,27 +84,27 @@ type ParamProps struct { // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. +// - Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// - Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// - Header - Custom headers that are expected as part of the request. +// - Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// - Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// - `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// - `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go index b81175afd..0059b99ae 100644 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ b/vendor/github.com/go-openapi/spec/schema_loader.go @@ -168,14 +168,7 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) normalized := normalizeBase(pth) debugLog("loading doc from: %s", normalized) - unescaped, err := url.PathUnescape(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - u := url.URL{Path: unescaped} - - data, fromCache := r.cache.Get(u.RequestURI()) + data, fromCache := r.cache.Get(normalized) if fromCache { return data, toFetch, fromCache, nil } diff --git a/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json new file mode 100644 index 000000000..bcbb84743 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/go-openapi/spec/schemas/v2/schema.json b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json new file mode 100644 index 000000000..ebe10ed32 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schemas/v2/schema.json @@ -0,0 +1,1607 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go index 7d38b6e62..876aa1275 100644 --- a/vendor/github.com/go-openapi/spec/spec.go +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -26,7 +26,7 @@ import ( const ( // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema + // JSONSchemaURL the url for the json schema JSONSchemaURL = "http://json-schema.org/draft-04/schema#" ) @@ -41,7 +41,7 @@ func MustLoadJSONSchemaDraft04() *Schema { // JSONSchemaDraft04 loads the json schema document for json shema draft04 func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") + b, err := jsonschemaDraft04JSONBytes() if err != nil { return nil, err } @@ -65,7 +65,7 @@ func MustLoadSwagger20Schema() *Schema { // Swagger20Schema loads the swagger 2.0 schema from the embedded assets func Swagger20Schema() (*Schema, error) { - b, err := Asset("v2/schema.json") + b, err := v2SchemaJSONBytes() if err != nil { return nil, err } diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go index 44722ffd5..1590fd175 100644 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -253,7 +253,7 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { var nw SchemaOrBool - if len(data) >= 4 { + if len(data) > 0 { if data[0] == '{' { var sch Schema if err := json.Unmarshal(data, &sch); err != nil { @@ -261,7 +261,7 @@ func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { } nw.Schema = &sch } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + nw.Allows = !bytes.Equal(data, []byte("false")) } *s = nw return nil diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/vendor/github.com/go-openapi/spec/url_go18.go deleted file mode 100644 index 60b785153..000000000 --- a/vendor/github.com/go-openapi/spec/url_go18.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !go1.19 -// +build !go1.19 - -package spec - -import "net/url" - -var parseURL = url.Parse diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go index 392e3e639..5bdfe40bc 100644 --- a/vendor/github.com/go-openapi/spec/url_go19.go +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -1,6 +1,3 @@ -//go:build go1.19 -// +build go1.19 - package spec import "net/url" diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go index c52d6bf71..90745d5ca 100644 --- a/vendor/github.com/go-openapi/swag/string_bytes.go +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -2,21 +2,7 @@ package swag import "unsafe" -type internalString struct { - Data unsafe.Pointer - Len int -} - // hackStringBytes returns the (unsafe) underlying bytes slice of a string. -func hackStringBytes(str string) []byte { - p := (*internalString)(unsafe.Pointer(&str)).Data - return unsafe.Slice((*byte)(p), len(str)) -} - -/* - * go1.20 version (for when go mod moves to a go1.20 requirement): - func hackStringBytes(str string) []byte { return unsafe.Slice(unsafe.StringData(str), len(str)) } -*/ diff --git a/vendor/github.com/go-openapi/validate/.golangci.yml b/vendor/github.com/go-openapi/validate/.golangci.yml index 81818ca67..22f8d21cc 100644 --- a/vendor/github.com/go-openapi/validate/.golangci.yml +++ b/vendor/github.com/go-openapi/validate/.golangci.yml @@ -1,12 +1,14 @@ linters-settings: govet: check-shadowing: true + golint: + min-confidence: 0 gocyclo: - min-complexity: 50 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 min-occurrences: 3 @@ -15,36 +17,45 @@ linters: enable-all: true disable: - maligned + - unparam - lll + - gochecknoinits + - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - tparallel + - gofumpt - paralleltest - - cyclop # because we have gocyclo already - # TODO: review the linters below. We disabled them to make the CI pass first. - - ireturn - - varnamelen - - forcetypeassert + - tparallel - thelper - # Disable deprecated linters. - # They will be removed from golangci-lint in future. + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode - interfacer - - golint \ No newline at end of file + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/validate/BENCHMARK.md b/vendor/github.com/go-openapi/validate/BENCHMARK.md new file mode 100644 index 000000000..79cf6a077 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/BENCHMARK.md @@ -0,0 +1,31 @@ +# Benchmark + +Validating the Kubernetes Swagger API + +## v0.22.6: 60,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 8549863982 ns/op 7067424936 B/op 59583275 allocs/op +``` + +## After refact PR: minor but noticable improvements: 25,000,000 allocs +``` +go test -bench Spec +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 4064535557 ns/op 3379715592 B/op 25320330 allocs/op +``` + +## After reduce GC pressure PR: 17,000,000 allocs +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/validate +cpu: AMD Ryzen 7 5800X 8-Core Processor +Benchmark_KubernetesSpec/validating_kubernetes_API-16 1 3758414145 ns/op 2593881496 B/op 17111373 allocs/op +``` diff --git a/vendor/github.com/go-openapi/validate/README.md b/vendor/github.com/go-openapi/validate/README.md index ea2d68cb6..e8e1bb218 100644 --- a/vendor/github.com/go-openapi/validate/README.md +++ b/vendor/github.com/go-openapi/validate/README.md @@ -1,7 +1,5 @@ -# Validation helpers -[![Build Status](https://travis-ci.org/go-openapi/validate.svg?branch=master)](https://travis-ci.org/go-openapi/validate) -[![Build status](https://ci.appveyor.com/api/projects/status/d6epy6vipueyh5fs/branch/master?svg=true)](https://ci.appveyor.com/project/fredbi/validate/branch/master) -[![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) +# Validation helpers [![Build Status](https://github.com/go-openapi/validate/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/validate/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/validate/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/validate) + [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/validate/master/LICENSE) [![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/validate.svg)](https://pkg.go.dev/github.com/go-openapi/validate) @@ -24,7 +22,7 @@ Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/m * Minimum, Maximum, MultipleOf * FormatOf -[Documentation](https://godoc.org/github.com/go-openapi/validate) +[Documentation](https://pkg.go.dev/github.com/go-openapi/validate) ## FAQ diff --git a/vendor/github.com/go-openapi/validate/appveyor.yml b/vendor/github.com/go-openapi/validate/appveyor.yml deleted file mode 100644 index 89e5bccb3..000000000 --- a/vendor/github.com/go-openapi/validate/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\validate -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m -args -enable-long ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index bd14c2a26..e0dd93839 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -25,48 +25,55 @@ import ( // According to Swagger spec, default values MUST validate their schema. type defaultValidator struct { SpecValidator *SpecValidator - visitedSchemas map[string]bool + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions } // resetVisited resets the internal state of visited schemas func (d *defaultValidator) resetVisited() { - d.visitedSchemas = map[string]bool{} + if d.visitedSchemas == nil { + d.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range d.visitedSchemas { + delete(d.visitedSchemas, k) + } } -func isVisited(path string, visitedSchemas map[string]bool) bool { - found := visitedSchemas[path] - if !found { - // search for overlapping paths - frags := strings.Split(path, ".") - if len(frags) < 2 { - // shortcut exit on smaller paths - return found +func isVisited(path string, visitedSchemas map[string]struct{}) bool { + _, found := visitedSchemas[path] + if found { + return true + } + + // search for overlapping paths + var ( + parent string + suffix string + ) + for i := len(path) - 2; i >= 0; i-- { + r := path[i] + if r != '.' { + continue } - last := len(frags) - 1 - var currentFragStr, parent string - for i := range frags { - if i == 0 { - currentFragStr = frags[last] - } else { - currentFragStr = strings.Join([]string{frags[last-i], currentFragStr}, ".") - } - if i < last { - parent = strings.Join(frags[0:last-i], ".") - } else { - parent = "" - } - if strings.HasSuffix(parent, currentFragStr) { - found = true - break - } + + parent = path[0:i] + suffix = path[i+1:] + + if strings.HasSuffix(parent, suffix) { + return true } } - return found + + return false } // beingVisited asserts a schema is being visited func (d *defaultValidator) beingVisited(path string) { - d.visitedSchemas[path] = true + d.visitedSchemas[path] = struct{}{} } // isVisited tells if a path has already been visited @@ -75,8 +82,9 @@ func (d *defaultValidator) isVisited(path string) bool { } // Validate validates the default values declared in the swagger spec -func (d *defaultValidator) Validate() (errs *Result) { - errs = new(Result) +func (d *defaultValidator) Validate() *Result { + errs := pools.poolOfResults.BorrowResult() // will redeem when merged + if d == nil || d.SpecValidator == nil { return errs } @@ -89,7 +97,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // every default value that is specified must validate against the schema for that property // headers, items, parameters, schema - res := new(Result) + res := pools.poolOfResults.BorrowResult() // will redeem when merged s := d.SpecValidator for method, pathItem := range s.expandedAnalyzer().Operations() { @@ -107,10 +115,12 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // default values provided must validate against their inline definition (no explicit schema) if param.Default != nil && param.Schema == nil { // check param default value is valid - red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Default) //#nosec + red := newParamValidator(¶m, s.KnownFormats, d.schemaOptions).Validate(param.Default) //#nosec if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -120,6 +130,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -129,6 +141,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } } @@ -154,7 +168,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // reset explored schemas to get depth-first recursive-proof exploration d.resetVisited() for nm, sch := range s.spec.Spec().Definitions { - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec + res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec } } return res @@ -170,17 +184,18 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) - // nolint: dupl if response.Headers != nil { // Safeguard for nm, h := range response.Headers { // reset explored schemas to get depth-first recursive-proof exploration d.resetVisited() if h.Default != nil { - red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Default) //#nosec + red := newHeaderValidator(nm, &h, s.KnownFormats, d.schemaOptions).Validate(h.Default) //#nosec if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -190,6 +205,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -209,6 +226,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon // Additional message to make sure the context of the error is not lost res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } return res @@ -220,11 +239,13 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri return nil } d.beingVisited(path) - res := new(Result) + res := pools.poolOfResults.BorrowResult() s := d.SpecValidator if schema.Default != nil { - res.Merge(NewSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Default)) + res.Merge( + newSchemaValidator(schema, s.spec.Spec(), path+".default", s.KnownFormats, d.schemaOptions).Validate(schema.Default), + ) } if schema.Items != nil { if schema.Items.Schema != nil { @@ -242,7 +263,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri } if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { // NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well) - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) } for propName, prop := range schema.Properties { res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec @@ -251,7 +272,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec } if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) } if schema.AllOf != nil { for i, aoSch := range schema.AllOf { @@ -262,13 +283,15 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri } // TODO: Temporary duplicated code. Need to refactor with examples -// nolint: dupl + func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { - res := new(Result) + res := pools.poolOfResults.BorrowResult() s := d.SpecValidator if items != nil { if items.Default != nil { - res.Merge(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Default)) + res.Merge( + newItemsValidator(path, in, items, root, s.KnownFormats, d.schemaOptions).Validate(0, items.Default), + ) } if items.Items != nil { res.Merge(d.validateDefaultValueItemsAgainstSchema(path+"[0].default", in, root, items.Items)) diff --git a/vendor/github.com/go-openapi/validate/doc.go b/vendor/github.com/go-openapi/validate/doc.go index f5ca9a5d5..d2b901eab 100644 --- a/vendor/github.com/go-openapi/validate/doc.go +++ b/vendor/github.com/go-openapi/validate/doc.go @@ -19,7 +19,7 @@ as well as tools to validate data against their schema. This package follows Swagger 2.0. specification (aka OpenAPI 2.0). Reference can be found here: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md. -Validating a specification +# Validating a specification Validates a spec document (from JSON or YAML) against the JSON schema for swagger, then checks a number of extra rules that can't be expressed in JSON schema. @@ -30,34 +30,36 @@ Entry points: - SpecValidator.Validate() Reported as errors: - [x] definition can't declare a property that's already defined by one of its ancestors - [x] definition's ancestor can't be a descendant of the same model - [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method - [x] each security reference should contain only unique scopes - [x] each security scope in a security definition should be unique - [x] parameters in path must be unique - [x] each path parameter must correspond to a parameter placeholder and vice versa - [x] each referenceable definition must have references - [x] each definition property listed in the required array must be defined in the properties of the model - [x] each parameter should have a unique `name` and `type` combination - [x] each operation should have only 1 parameter of type body - [x] each reference must point to a valid object - [x] every default value that is specified must validate against the schema for that property - [x] items property is required for all schemas/definitions of type `array` - [x] path parameters must be declared a required - [x] headers must not contain $ref - [x] schema and property examples provided must validate against their respective object's schema - [x] examples provided must validate their schema + + [x] definition can't declare a property that's already defined by one of its ancestors + [x] definition's ancestor can't be a descendant of the same model + [x] path uniqueness: each api path should be non-verbatim (account for path param names) unique per method. Validation can be laxed by disabling StrictPathParamUniqueness. + [x] each security reference should contain only unique scopes + [x] each security scope in a security definition should be unique + [x] parameters in path must be unique + [x] each path parameter must correspond to a parameter placeholder and vice versa + [x] each referenceable definition must have references + [x] each definition property listed in the required array must be defined in the properties of the model + [x] each parameter should have a unique `name` and `type` combination + [x] each operation should have only 1 parameter of type body + [x] each reference must point to a valid object + [x] every default value that is specified must validate against the schema for that property + [x] items property is required for all schemas/definitions of type `array` + [x] path parameters must be declared a required + [x] headers must not contain $ref + [x] schema and property examples provided must validate against their respective object's schema + [x] examples provided must validate their schema Reported as warnings: - [x] path parameters should not contain any of [{,},\w] - [x] empty path - [x] unused definitions - [x] unsupported validation of examples on non-JSON media types - [x] examples in response without schema - [x] readOnly properties should not be required -Validating a schema + [x] path parameters should not contain any of [{,},\w] + [x] empty path + [x] unused definitions + [x] unsupported validation of examples on non-JSON media types + [x] examples in response without schema + [x] readOnly properties should not be required + +# Validating a schema The schema validation toolkit validates data against JSON-schema-draft 04 schema. @@ -70,16 +72,16 @@ Entry points: - AgainstSchema() - ... -Known limitations +# Known limitations With the current version of this package, the following aspects of swagger are not yet supported: - [ ] errors and warnings are not reported with key/line number in spec - [ ] default values and examples on responses only support application/json producer type - [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values - [ ] rules for collectionFormat are not implemented - [ ] no validation rule for polymorphism support (discriminator) [not done here] - [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid - [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 + [ ] errors and warnings are not reported with key/line number in spec + [ ] default values and examples on responses only support application/json producer type + [ ] invalid numeric constraints (such as Minimum, etc..) are not checked except for default and example values + [ ] rules for collectionFormat are not implemented + [ ] no validation rule for polymorphism support (discriminator) [not done here] + [ ] valid js ECMA regexp not supported by Go regexp engine are considered invalid + [ ] arbitrary large numbers are not supported: max is math.MaxFloat64 */ package validate diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index c8bffd78e..d08956973 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -23,17 +23,27 @@ import ( // ExampleValidator validates example values defined in a spec type exampleValidator struct { SpecValidator *SpecValidator - visitedSchemas map[string]bool + visitedSchemas map[string]struct{} + schemaOptions *SchemaValidatorOptions } // resetVisited resets the internal state of visited schemas func (ex *exampleValidator) resetVisited() { - ex.visitedSchemas = map[string]bool{} + if ex.visitedSchemas == nil { + ex.visitedSchemas = make(map[string]struct{}) + + return + } + + // TODO(go1.21): clear(ex.visitedSchemas) + for k := range ex.visitedSchemas { + delete(ex.visitedSchemas, k) + } } // beingVisited asserts a schema is being visited func (ex *exampleValidator) beingVisited(path string) { - ex.visitedSchemas[path] = true + ex.visitedSchemas[path] = struct{}{} } // isVisited tells if a path has already been visited @@ -48,9 +58,9 @@ func (ex *exampleValidator) isVisited(path string) bool { // - schemas // - individual property // - responses -// -func (ex *exampleValidator) Validate() (errs *Result) { - errs = new(Result) +func (ex *exampleValidator) Validate() *Result { + errs := pools.poolOfResults.BorrowResult() + if ex == nil || ex.SpecValidator == nil { return errs } @@ -65,7 +75,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // in: schemas, properties, object, items // not in: headers, parameters without schema - res := new(Result) + res := pools.poolOfResults.BorrowResult() s := ex.SpecValidator for method, pathItem := range s.expandedAnalyzer().Operations() { @@ -83,10 +93,12 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // default values provided must validate against their inline definition (no explicit schema) if param.Example != nil && param.Schema == nil { // check param default value is valid - red := NewParamValidator(¶m, s.KnownFormats).Validate(param.Example) //#nosec + red := newParamValidator(¶m, s.KnownFormats, ex.schemaOptions).Validate(param.Example) //#nosec if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -96,6 +108,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -105,6 +119,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } } @@ -130,7 +146,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // reset explored schemas to get depth-first recursive-proof exploration ex.resetVisited() for nm, sch := range s.spec.Spec().Definitions { - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec + res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec } } return res @@ -146,17 +162,18 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo responseName, responseCodeAsStr := responseHelp.responseMsgVariants(responseType, responseCode) - // nolint: dupl if response.Headers != nil { // Safeguard for nm, h := range response.Headers { // reset explored schemas to get depth-first recursive-proof exploration ex.resetVisited() if h.Example != nil { - red := NewHeaderValidator(nm, &h, s.KnownFormats).Validate(h.Example) //#nosec + red := newHeaderValidator(nm, &h, s.KnownFormats, ex.schemaOptions).Validate(h.Example) //#nosec if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -166,6 +183,8 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -185,13 +204,17 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo // Additional message to make sure the context of the error is not lost res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } if response.Examples != nil { if response.Schema != nil { if example, ok := response.Examples["application/json"]; ok { - res.MergeAsWarnings(NewSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, SwaggerSchema(true)).Validate(example)) + res.MergeAsWarnings( + newSchemaValidator(response.Schema, s.spec.Spec(), path+".examples", s.KnownFormats, s.schemaOptions).Validate(example), + ) } else { // TODO: validate other media types too res.AddWarnings(examplesMimeNotSupportedMsg(operationID, responseName)) @@ -210,10 +233,12 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } ex.beingVisited(path) s := ex.SpecValidator - res := new(Result) + res := pools.poolOfResults.BorrowResult() if schema.Example != nil { - res.MergeAsWarnings(NewSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, SwaggerSchema(true)).Validate(schema.Example)) + res.MergeAsWarnings( + newSchemaValidator(schema, s.spec.Spec(), path+".example", s.KnownFormats, ex.schemaOptions).Validate(schema.Example), + ) } if schema.Items != nil { if schema.Items.Schema != nil { @@ -231,7 +256,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { // NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well) - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) } for propName, prop := range schema.Properties { res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec @@ -240,7 +265,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec } if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) } if schema.AllOf != nil { for i, aoSch := range schema.AllOf { @@ -251,13 +276,16 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } // TODO: Temporary duplicated code. Need to refactor with examples -// nolint: dupl +// + func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { - res := new(Result) + res := pools.poolOfResults.BorrowResult() s := ex.SpecValidator if items != nil { if items.Example != nil { - res.MergeAsWarnings(newItemsValidator(path, in, items, root, s.KnownFormats).Validate(0, items.Example)) + res.MergeAsWarnings( + newItemsValidator(path, in, items, root, s.KnownFormats, ex.schemaOptions).Validate(0, items.Example), + ) } if items.Items != nil { res.Merge(ex.validateExampleValueItemsAgainstSchema(path+"[0].example", in, root, items.Items)) @@ -266,5 +294,6 @@ func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in stri res.AddErrors(invalidPatternInMsg(path, in, items.Pattern)) } } + return res } diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go index 0ad996cbb..f4e355213 100644 --- a/vendor/github.com/go-openapi/validate/formats.go +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -22,10 +22,32 @@ import ( ) type formatValidator struct { - Format string Path string In string + Format string KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newFormatValidator(path, in, format string, formats strfmt.Registry, opts *SchemaValidatorOptions) *formatValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var f *formatValidator + if opts.recycleValidators { + f = pools.poolOfFormatValidators.BorrowValidator() + } else { + f = new(formatValidator) + } + + f.Path = path + f.In = in + f.Format = format + f.KnownFormats = formats + f.Options = opts + + return f } func (f *formatValidator) SetPath(path string) { @@ -33,37 +55,45 @@ func (f *formatValidator) SetPath(path string) { } func (f *formatValidator) Applies(source interface{}, kind reflect.Kind) bool { - doit := func() bool { - if source == nil { - return false - } - switch source := source.(type) { - case *spec.Items: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Parameter: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Schema: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - case *spec.Header: - return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) - } + if source == nil || f.KnownFormats == nil { + return false + } + + switch source := source.(type) { + case *spec.Items: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Parameter: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Schema: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + case *spec.Header: + return kind == reflect.String && f.KnownFormats.ContainsName(source.Format) + default: return false } - r := doit() - debugLog("format validator for %q applies %t for %T (kind: %v)\n", f.Path, r, source, kind) - return r } func (f *formatValidator) Validate(val interface{}) *Result { - result := new(Result) - debugLog("validating \"%v\" against format: %s", val, f.Format) + if f.Options.recycleValidators { + defer func() { + f.redeem() + }() + } + + var result *Result + if f.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } if err := FormatOf(f.Path, f.In, f.Format, val.(string), f.KnownFormats); err != nil { result.AddErrors(err) } - if result.HasErrors() { - return result - } - return nil + return result +} + +func (f *formatValidator) redeem() { + pools.poolOfFormatValidators.RedeemValidator(f) } diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index 48ebfab58..757e403d9 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -101,9 +101,17 @@ type errorHelper struct { // A collection of unexported helpers for error construction } -func (h *errorHelper) sErr(err errors.Error) *Result { +func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result { // Builds a Result from standard errors.Error - return &Result{Errors: []error{err}} + var result *Result + if recycle { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + result.Errors = []error{err} + + return result } func (h *errorHelper) addPointerError(res *Result, err error, ref string, fromPath string) *Result { @@ -157,7 +165,7 @@ func (h *valueHelper) asInt64(val interface{}) int64 { // Number conversion function for int64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -174,7 +182,7 @@ func (h *valueHelper) asUint64(val interface{}) uint64 { // Number conversion function for uint64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return uint64(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -192,7 +200,7 @@ func (h *valueHelper) asFloat64(val interface{}) float64 { // Number conversion function for float64, without error checking // (implements an implicit type upgrade). v := reflect.ValueOf(val) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return float64(v.Int()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -225,7 +233,7 @@ func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, re operation.Parameters = resolvedParams for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path, - func(p spec.Parameter, err error) bool { + func(_ spec.Parameter, err error) bool { // since params have already been expanded, there are few causes for error res.AddErrors(someParametersBrokenMsg(path, method, operationID)) // original error from analyzer @@ -250,7 +258,7 @@ func (h *paramHelper) resolveParam(path, method, operationID string, param *spec } if err != nil { // Safeguard - // NOTE: we may enter enter here when the whole parameter is an unresolved $ref + // NOTE: we may enter here when the whole parameter is an unresolved $ref refPath := strings.Join([]string{"\"" + path + "\"", method}, ".") errorHelp.addPointerError(res, err, param.Ref.String(), refPath) return nil, res @@ -306,6 +314,7 @@ func (r *responseHelper) expandResponseRef( errorHelp.addPointerError(res, err, response.Ref.String(), path) return nil, res } + return response, res } diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go index 7bb12615d..dff73fa98 100644 --- a/vendor/github.com/go-openapi/validate/object_validator.go +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -15,8 +15,8 @@ package validate import ( + "fmt" "reflect" - "regexp" "strings" "github.com/go-openapi/errors" @@ -35,62 +35,116 @@ type objectValidator struct { PatternProperties map[string]spec.Schema Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions + splitPath []string +} + +func newObjectValidator(path, in string, + maxProperties, minProperties *int64, required []string, properties spec.SchemaProperties, + additionalProperties *spec.SchemaOrBool, patternProperties spec.SchemaProperties, + root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *objectValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *objectValidator + if opts.recycleValidators { + v = pools.poolOfObjectValidators.BorrowValidator() + } else { + v = new(objectValidator) + } + + v.Path = path + v.In = in + v.MaxProperties = maxProperties + v.MinProperties = minProperties + v.Required = required + v.Properties = properties + v.AdditionalProperties = additionalProperties + v.PatternProperties = patternProperties + v.Root = root + v.KnownFormats = formats + v.Options = opts + v.splitPath = strings.Split(v.Path, ".") + + return v } func (o *objectValidator) SetPath(path string) { o.Path = path + o.splitPath = strings.Split(path, ".") } func (o *objectValidator) Applies(source interface{}, kind reflect.Kind) bool { // TODO: this should also work for structs // there is a problem in the type validator where it will be unhappy about null values // so that requires more testing - r := reflect.TypeOf(source) == specSchemaType && (kind == reflect.Map || kind == reflect.Struct) - debugLog("object validator for %q applies %t for %T (kind: %v)\n", o.Path, r, source, kind) - return r + _, isSchema := source.(*spec.Schema) + return isSchema && (kind == reflect.Map || kind == reflect.Struct) } func (o *objectValidator) isProperties() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && p[len(p)-1] == jsonProperties && p[len(p)-2] != jsonProperties } func (o *objectValidator) isDefault() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && p[len(p)-1] == jsonDefault && p[len(p)-2] != jsonDefault } func (o *objectValidator) isExample() bool { - p := strings.Split(o.Path, ".") + p := o.splitPath return len(p) > 1 && (p[len(p)-1] == swaggerExample || p[len(p)-1] == swaggerExamples) && p[len(p)-2] != swaggerExample } func (o *objectValidator) checkArrayMustHaveItems(res *Result, val map[string]interface{}) { // for swagger 2.0 schemas, there is an additional constraint to have array items defined explicitly. // with pure jsonschema draft 4, one may have arrays with undefined items (i.e. any type). - if t, typeFound := val[jsonType]; typeFound { - if tpe, ok := t.(string); ok && tpe == arrayType { - if item, itemsKeyFound := val[jsonItems]; !itemsKeyFound { - res.AddErrors(errors.Required(jsonItems, o.Path, item)) - } - } + if val == nil { + return } + + t, typeFound := val[jsonType] + if !typeFound { + return + } + + tpe, isString := t.(string) + if !isString || tpe != arrayType { + return + } + + item, itemsKeyFound := val[jsonItems] + if itemsKeyFound { + return + } + + res.AddErrors(errors.Required(jsonItems, o.Path, item)) } func (o *objectValidator) checkItemsMustBeTypeArray(res *Result, val map[string]interface{}) { - if !o.isProperties() && !o.isDefault() && !o.isExample() { - if _, itemsKeyFound := val[jsonItems]; itemsKeyFound { - t, typeFound := val[jsonType] - if typeFound { - if tpe, ok := t.(string); !ok || tpe != arrayType { - res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) - } - } else { - // there is no type - res.AddErrors(errors.Required(jsonType, o.Path, t)) - } - } + if val == nil { + return + } + + if o.isProperties() || o.isDefault() || o.isExample() { + return + } + + _, itemsKeyFound := val[jsonItems] + if !itemsKeyFound { + return + } + + t, typeFound := val[jsonType] + if !typeFound { + // there is no type + res.AddErrors(errors.Required(jsonType, o.Path, t)) + } + + if tpe, isString := t.(string); !isString || tpe != arrayType { + res.AddErrors(errors.InvalidType(o.Path, o.In, arrayType, nil)) } } @@ -104,176 +158,274 @@ func (o *objectValidator) precheck(res *Result, val map[string]interface{}) { } func (o *objectValidator) Validate(data interface{}) *Result { - val := data.(map[string]interface{}) - // TODO: guard against nil data + if o.Options.recycleValidators { + defer func() { + o.redeem() + }() + } + + var val map[string]interface{} + if data != nil { + var ok bool + val, ok = data.(map[string]interface{}) + if !ok { + return errorHelp.sErr(invalidObjectMsg(o.Path, o.In), o.Options.recycleResult) + } + } numKeys := int64(len(val)) if o.MinProperties != nil && numKeys < *o.MinProperties { - return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties)) + return errorHelp.sErr(errors.TooFewProperties(o.Path, o.In, *o.MinProperties), o.Options.recycleResult) } if o.MaxProperties != nil && numKeys > *o.MaxProperties { - return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties)) + return errorHelp.sErr(errors.TooManyProperties(o.Path, o.In, *o.MaxProperties), o.Options.recycleResult) } - res := new(Result) + var res *Result + if o.Options.recycleResult { + res = pools.poolOfResults.BorrowResult() + } else { + res = new(Result) + } o.precheck(res, val) // check validity of field names if o.AdditionalProperties != nil && !o.AdditionalProperties.Allows { // Case: additionalProperties: false - for k := range val { - _, regularProperty := o.Properties[k] - matched := false - - for pk := range o.PatternProperties { - if matches, _ := regexp.MatchString(pk, k); matches { - matched = true - break - } - } - - if !regularProperty && k != "$schema" && k != "id" && !matched { - // Special properties "$schema" and "id" are ignored - res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) - - // BUG(fredbi): This section should move to a part dedicated to spec validation as - // it will conflict with regular schemas where a property "headers" is defined. - - // - // Croaks a more explicit message on top of the standard one - // on some recognized cases. - // - // NOTE: edge cases with invalid type assertion are simply ignored here. - // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered - // by higher level callers (the IMPORTANT! tag will be eventually - // removed). - if k == "headers" && val[k] != nil { - // $ref is forbidden in header - if headers, mapOk := val[k].(map[string]interface{}); mapOk { - for headerKey, headerBody := range headers { - if headerBody != nil { - if headerSchema, mapOfMapOk := headerBody.(map[string]interface{}); mapOfMapOk { - if _, found := headerSchema["$ref"]; found { - var msg string - if refString, stringOk := headerSchema["$ref"].(string); stringOk { - msg = strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") - } - res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) - } - } - } - } - } - /* - case "$ref": - if val[k] != nil { - // TODO: check context of that ref: warn about siblings, check against invalid context - } - */ - } - } - } + o.validateNoAdditionalProperties(val, res) } else { - // Cases: no additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } - for key, value := range val { - _, regularProperty := o.Properties[key] - - // Validates property against "patternProperties" if applicable - // BUG(fredbi): succeededOnce is always false - - // NOTE: how about regular properties which do not match patternProperties? - matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) - - if !(regularProperty || matched || succeededOnce) { - - // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator - if o.AdditionalProperties != nil && o.AdditionalProperties.Schema != nil { - // AdditionalProperties as Schema - r := NewSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value) - res.mergeForField(data.(map[string]interface{}), key, r) - } else if regularProperty && !(matched || succeededOnce) { - // TODO: this is dead code since regularProperty=false here - res.AddErrors(errors.FailedAllPatternProperties(o.Path, o.In, key)) - } - } - } - // Valid cases: additionalProperties: true or undefined + // Cases: empty additionalProperties (implying: true), or additionalProperties: true, or additionalProperties: { <> } + o.validateAdditionalProperties(val, res) } - createdFromDefaults := map[string]bool{} - - // Property types: - // - regular Property - for pName := range o.Properties { - pSchema := o.Properties[pName] // one instance per iteration - rName := pName - if o.Path != "" { - rName = o.Path + "." + pName - } - - // Recursively validates each property against its schema - if v, ok := val[pName]; ok { - r := NewSchemaValidator(&pSchema, o.Root, rName, o.KnownFormats, o.Options.Options()...).Validate(v) - res.mergeForField(data.(map[string]interface{}), pName, r) - } else if pSchema.Default != nil { - // If a default value is defined, creates the property from defaults - // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. - createdFromDefaults[pName] = true - res.addPropertySchemata(data.(map[string]interface{}), pName, &pSchema) - } - } - - // Check required properties - if len(o.Required) > 0 { - for _, k := range o.Required { - if v, ok := val[k]; !ok && !createdFromDefaults[k] { - res.AddErrors(errors.Required(o.Path+"."+k, o.In, v)) - continue - } - } - } + o.validatePropertiesSchema(val, res) // Check patternProperties // TODO: it looks like we have done that twice in many cases for key, value := range val { _, regularProperty := o.Properties[key] - matched, _ /*succeededOnce*/, patterns := o.validatePatternProperty(key, value, res) - if !regularProperty && (matched /*|| succeededOnce*/) { - for _, pName := range patterns { - if v, ok := o.PatternProperties[pName]; ok { - r := NewSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...).Validate(value) - res.mergeForField(data.(map[string]interface{}), key, r) - } + matched, _, patterns := o.validatePatternProperty(key, value, res) // applies to regular properties as well + if regularProperty || !matched { + continue + } + + for _, pName := range patterns { + if v, ok := o.PatternProperties[pName]; ok { + r := newSchemaValidator(&v, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(data.(map[string]interface{}), key, r) } } } + return res } +func (o *objectValidator) validateNoAdditionalProperties(val map[string]interface{}, res *Result) { + for k := range val { + if k == "$schema" || k == "id" { + // special properties "$schema" and "id" are ignored + continue + } + + _, regularProperty := o.Properties[k] + if regularProperty { + continue + } + + matched := false + for pk := range o.PatternProperties { + re, err := compileRegexp(pk) + if err != nil { + continue + } + if matches := re.MatchString(k); matches { + matched = true + break + } + } + if matched { + continue + } + + res.AddErrors(errors.PropertyNotAllowed(o.Path, o.In, k)) + + // BUG(fredbi): This section should move to a part dedicated to spec validation as + // it will conflict with regular schemas where a property "headers" is defined. + + // + // Croaks a more explicit message on top of the standard one + // on some recognized cases. + // + // NOTE: edge cases with invalid type assertion are simply ignored here. + // NOTE: prefix your messages here by "IMPORTANT!" so there are not filtered + // by higher level callers (the IMPORTANT! tag will be eventually + // removed). + if k != "headers" || val[k] == nil { + continue + } + + // $ref is forbidden in header + headers, mapOk := val[k].(map[string]interface{}) + if !mapOk { + continue + } + + for headerKey, headerBody := range headers { + if headerBody == nil { + continue + } + + headerSchema, mapOfMapOk := headerBody.(map[string]interface{}) + if !mapOfMapOk { + continue + } + + _, found := headerSchema["$ref"] + if !found { + continue + } + + refString, stringOk := headerSchema["$ref"].(string) + if !stringOk { + continue + } + + msg := strings.Join([]string{", one may not use $ref=\":", refString, "\""}, "") + res.AddErrors(refNotAllowedInHeaderMsg(o.Path, headerKey, msg)) + /* + case "$ref": + if val[k] != nil { + // TODO: check context of that ref: warn about siblings, check against invalid context + } + */ + } + } +} + +func (o *objectValidator) validateAdditionalProperties(val map[string]interface{}, res *Result) { + for key, value := range val { + _, regularProperty := o.Properties[key] + if regularProperty { + continue + } + + // Validates property against "patternProperties" if applicable + // BUG(fredbi): succeededOnce is always false + + // NOTE: how about regular properties which do not match patternProperties? + matched, succeededOnce, _ := o.validatePatternProperty(key, value, res) + if matched || succeededOnce { + continue + } + + if o.AdditionalProperties == nil || o.AdditionalProperties.Schema == nil { + continue + } + + // Cases: properties which are not regular properties and have not been matched by the PatternProperties validator + // AdditionalProperties as Schema + r := newSchemaValidator(o.AdditionalProperties.Schema, o.Root, o.Path+"."+key, o.KnownFormats, o.Options).Validate(value) + res.mergeForField(val, key, r) + } + // Valid cases: additionalProperties: true or undefined +} + +func (o *objectValidator) validatePropertiesSchema(val map[string]interface{}, res *Result) { + createdFromDefaults := map[string]struct{}{} + + // Property types: + // - regular Property + pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties + defer func() { + pools.poolOfSchemas.RedeemSchema(pSchema) + }() + + for pName := range o.Properties { + *pSchema = o.Properties[pName] + var rName string + if o.Path == "" { + rName = pName + } else { + rName = o.Path + "." + pName + } + + // Recursively validates each property against its schema + v, ok := val[pName] + if ok { + r := newSchemaValidator(pSchema, o.Root, rName, o.KnownFormats, o.Options).Validate(v) + res.mergeForField(val, pName, r) + + continue + } + + if pSchema.Default != nil { + // if a default value is defined, creates the property from defaults + // NOTE: JSON schema does not enforce default values to be valid against schema. Swagger does. + createdFromDefaults[pName] = struct{}{} + if !o.Options.skipSchemataResult { + res.addPropertySchemata(val, pName, pSchema) // this shallow-clones the content of the pSchema pointer + } + } + } + + if len(o.Required) == 0 { + return + } + + // Check required properties + for _, k := range o.Required { + v, ok := val[k] + if ok { + continue + } + _, isCreatedFromDefaults := createdFromDefaults[k] + if isCreatedFromDefaults { + continue + } + + res.AddErrors(errors.Required(fmt.Sprintf("%s.%s", o.Path, k), o.In, v)) + } +} + // TODO: succeededOnce is not used anywhere func (o *objectValidator) validatePatternProperty(key string, value interface{}, result *Result) (bool, bool, []string) { - matched := false - succeededOnce := false - var patterns []string - - for k, schema := range o.PatternProperties { - sch := schema - if match, _ := regexp.MatchString(k, key); match { - patterns = append(patterns, k) - matched = true - validator := NewSchemaValidator(&sch, o.Root, o.Path+"."+key, o.KnownFormats, o.Options.Options()...) - - res := validator.Validate(value) - result.Merge(res) - } + if len(o.PatternProperties) == 0 { + return false, false, nil } - // BUG(fredbi): can't get to here. Should remove dead code (commented out). + matched := false + succeededOnce := false + patterns := make([]string, 0, len(o.PatternProperties)) - // if succeededOnce { - // result.Inc() - // } + schema := pools.poolOfSchemas.BorrowSchema() + defer func() { + pools.poolOfSchemas.RedeemSchema(schema) + }() + + for k := range o.PatternProperties { + re, err := compileRegexp(k) + if err != nil { + continue + } + + match := re.MatchString(key) + if !match { + continue + } + + *schema = o.PatternProperties[k] + patterns = append(patterns, k) + matched = true + validator := newSchemaValidator(schema, o.Root, fmt.Sprintf("%s.%s", o.Path, key), o.KnownFormats, o.Options) + + res := validator.Validate(value) + result.Merge(res) + } return matched, succeededOnce, patterns } + +func (o *objectValidator) redeem() { + pools.poolOfObjectValidators.RedeemValidator(o) +} diff --git a/vendor/github.com/go-openapi/validate/options.go b/vendor/github.com/go-openapi/validate/options.go index deeec2f2e..cfe9b0660 100644 --- a/vendor/github.com/go-openapi/validate/options.go +++ b/vendor/github.com/go-openapi/validate/options.go @@ -21,10 +21,29 @@ import "sync" // NOTE: other options might be needed, for example a go-swagger specific mode. type Opts struct { ContinueOnErrors bool // true: continue reporting errors, even if spec is invalid + + // StrictPathParamUniqueness enables a strict validation of paths that include + // path parameters. When true, it will enforce that for each method, the path + // is unique, regardless of path parameters such that GET:/petstore/{id} and + // GET:/petstore/{pet} anre considered duplicate paths. + // + // Consider disabling if path parameters can include slashes such as + // GET:/v1/{shelve} and GET:/v1/{book}, where the IDs are "shelve/*" and + // /"shelve/*/book/*" respectively. + StrictPathParamUniqueness bool + SkipSchemataResult bool } var ( - defaultOpts = Opts{ContinueOnErrors: false} // default is to stop validation on errors + defaultOpts = Opts{ + // default is to stop validation on errors + ContinueOnErrors: false, + + // StrictPathParamUniqueness is defaulted to true. This maintains existing + // behavior. + StrictPathParamUniqueness: true, + } + defaultOptsMutex = &sync.Mutex{} ) diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go new file mode 100644 index 000000000..3ddce4dcc --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools.go @@ -0,0 +1,366 @@ +//go:build !validatedebug + +package validate + +import ( + "sync" + + "github.com/go-openapi/spec" +) + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + }, + } +} + +type ( + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + } + + objectValidatorsPool struct { + *sync.Pool + } + + sliceValidatorsPool struct { + *sync.Pool + } + + itemsValidatorsPool struct { + *sync.Pool + } + + basicCommonValidatorsPool struct { + *sync.Pool + } + + headerValidatorsPool struct { + *sync.Pool + } + + paramValidatorsPool struct { + *sync.Pool + } + + basicSliceValidatorsPool struct { + *sync.Pool + } + + numberValidatorsPool struct { + *sync.Pool + } + + stringValidatorsPool struct { + *sync.Pool + } + + schemaPropsValidatorsPool struct { + *sync.Pool + } + + formatValidatorsPool struct { + *sync.Pool + } + + typeValidatorsPool struct { + *sync.Pool + } + + schemasPool struct { + *sync.Pool + } + + resultsPool struct { + *sync.Pool + } +) + +func (p schemaValidatorsPool) BorrowValidator() *SchemaValidator { + return p.Get().(*SchemaValidator) +} + +func (p schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.Put(s) +} + +func (p objectValidatorsPool) BorrowValidator() *objectValidator { + return p.Get().(*objectValidator) +} + +func (p objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.Put(s) +} + +func (p sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + return p.Get().(*schemaSliceValidator) +} + +func (p sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.Put(s) +} + +func (p itemsValidatorsPool) BorrowValidator() *itemsValidator { + return p.Get().(*itemsValidator) +} + +func (p itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.Put(s) +} + +func (p basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + return p.Get().(*basicCommonValidator) +} + +func (p basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.Put(s) +} + +func (p headerValidatorsPool) BorrowValidator() *HeaderValidator { + return p.Get().(*HeaderValidator) +} + +func (p headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.Put(s) +} + +func (p paramValidatorsPool) BorrowValidator() *ParamValidator { + return p.Get().(*ParamValidator) +} + +func (p paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.Put(s) +} + +func (p basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + return p.Get().(*basicSliceValidator) +} + +func (p basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.Put(s) +} + +func (p numberValidatorsPool) BorrowValidator() *numberValidator { + return p.Get().(*numberValidator) +} + +func (p numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.Put(s) +} + +func (p stringValidatorsPool) BorrowValidator() *stringValidator { + return p.Get().(*stringValidator) +} + +func (p stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.Put(s) +} + +func (p schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + return p.Get().(*schemaPropsValidator) +} + +func (p schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.Put(s) +} + +func (p formatValidatorsPool) BorrowValidator() *formatValidator { + return p.Get().(*formatValidator) +} + +func (p formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.Put(s) +} + +func (p typeValidatorsPool) BorrowValidator() *typeValidator { + return p.Get().(*typeValidator) +} + +func (p typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.Put(s) +} + +func (p schemasPool) BorrowSchema() *spec.Schema { + return p.Get().(*spec.Schema) +} + +func (p schemasPool) RedeemSchema(s *spec.Schema) { + p.Put(s) +} + +func (p resultsPool) BorrowResult() *Result { + return p.Get().(*Result).cleared() +} + +func (p resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + return + } + p.Put(s) +} diff --git a/vendor/github.com/go-openapi/validate/pools_debug.go b/vendor/github.com/go-openapi/validate/pools_debug.go new file mode 100644 index 000000000..12949f02a --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools_debug.go @@ -0,0 +1,1012 @@ +//go:build validatedebug + +package validate + +import ( + "fmt" + "runtime" + "sync" + "testing" + + "github.com/go-openapi/spec" +) + +// This version of the pools is to be used for debugging and testing, with build tag "validatedebug". +// +// In this mode, the pools are tracked for allocation and redemption of borrowed objects, so we can +// verify a few behaviors of the validators. The debug pools panic when an invalid usage pattern is detected. + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + debugMap: make(map[*SchemaValidator]status), + allocMap: make(map[*SchemaValidator]string), + redeemMap: make(map[*SchemaValidator]string), + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + debugMap: make(map[*objectValidator]status), + allocMap: make(map[*objectValidator]string), + redeemMap: make(map[*objectValidator]string), + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + debugMap: make(map[*schemaSliceValidator]status), + allocMap: make(map[*schemaSliceValidator]string), + redeemMap: make(map[*schemaSliceValidator]string), + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + debugMap: make(map[*itemsValidator]status), + allocMap: make(map[*itemsValidator]string), + redeemMap: make(map[*itemsValidator]string), + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + debugMap: make(map[*basicCommonValidator]status), + allocMap: make(map[*basicCommonValidator]string), + redeemMap: make(map[*basicCommonValidator]string), + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + debugMap: make(map[*HeaderValidator]status), + allocMap: make(map[*HeaderValidator]string), + redeemMap: make(map[*HeaderValidator]string), + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + debugMap: make(map[*ParamValidator]status), + allocMap: make(map[*ParamValidator]string), + redeemMap: make(map[*ParamValidator]string), + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + debugMap: make(map[*basicSliceValidator]status), + allocMap: make(map[*basicSliceValidator]string), + redeemMap: make(map[*basicSliceValidator]string), + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + debugMap: make(map[*numberValidator]status), + allocMap: make(map[*numberValidator]string), + redeemMap: make(map[*numberValidator]string), + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + debugMap: make(map[*stringValidator]status), + allocMap: make(map[*stringValidator]string), + redeemMap: make(map[*stringValidator]string), + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + debugMap: make(map[*schemaPropsValidator]status), + allocMap: make(map[*schemaPropsValidator]string), + redeemMap: make(map[*schemaPropsValidator]string), + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + debugMap: make(map[*formatValidator]status), + allocMap: make(map[*formatValidator]string), + redeemMap: make(map[*formatValidator]string), + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + debugMap: make(map[*typeValidator]status), + allocMap: make(map[*typeValidator]string), + redeemMap: make(map[*typeValidator]string), + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + debugMap: make(map[*spec.Schema]status), + allocMap: make(map[*spec.Schema]string), + redeemMap: make(map[*spec.Schema]string), + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + debugMap: make(map[*Result]status), + allocMap: make(map[*Result]string), + redeemMap: make(map[*Result]string), + }, + } +} + +const ( + statusFresh status = iota + 1 + statusRecycled + statusRedeemed +) + +func (s status) String() string { + switch s { + case statusFresh: + return "fresh" + case statusRecycled: + return "recycled" + case statusRedeemed: + return "redeemed" + default: + panic(fmt.Errorf("invalid status: %d", s)) + } +} + +type ( + // Debug + status uint8 + + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + debugMap map[*SchemaValidator]status + allocMap map[*SchemaValidator]string + redeemMap map[*SchemaValidator]string + mx sync.Mutex + } + + objectValidatorsPool struct { + *sync.Pool + debugMap map[*objectValidator]status + allocMap map[*objectValidator]string + redeemMap map[*objectValidator]string + mx sync.Mutex + } + + sliceValidatorsPool struct { + *sync.Pool + debugMap map[*schemaSliceValidator]status + allocMap map[*schemaSliceValidator]string + redeemMap map[*schemaSliceValidator]string + mx sync.Mutex + } + + itemsValidatorsPool struct { + *sync.Pool + debugMap map[*itemsValidator]status + allocMap map[*itemsValidator]string + redeemMap map[*itemsValidator]string + mx sync.Mutex + } + + basicCommonValidatorsPool struct { + *sync.Pool + debugMap map[*basicCommonValidator]status + allocMap map[*basicCommonValidator]string + redeemMap map[*basicCommonValidator]string + mx sync.Mutex + } + + headerValidatorsPool struct { + *sync.Pool + debugMap map[*HeaderValidator]status + allocMap map[*HeaderValidator]string + redeemMap map[*HeaderValidator]string + mx sync.Mutex + } + + paramValidatorsPool struct { + *sync.Pool + debugMap map[*ParamValidator]status + allocMap map[*ParamValidator]string + redeemMap map[*ParamValidator]string + mx sync.Mutex + } + + basicSliceValidatorsPool struct { + *sync.Pool + debugMap map[*basicSliceValidator]status + allocMap map[*basicSliceValidator]string + redeemMap map[*basicSliceValidator]string + mx sync.Mutex + } + + numberValidatorsPool struct { + *sync.Pool + debugMap map[*numberValidator]status + allocMap map[*numberValidator]string + redeemMap map[*numberValidator]string + mx sync.Mutex + } + + stringValidatorsPool struct { + *sync.Pool + debugMap map[*stringValidator]status + allocMap map[*stringValidator]string + redeemMap map[*stringValidator]string + mx sync.Mutex + } + + schemaPropsValidatorsPool struct { + *sync.Pool + debugMap map[*schemaPropsValidator]status + allocMap map[*schemaPropsValidator]string + redeemMap map[*schemaPropsValidator]string + mx sync.Mutex + } + + formatValidatorsPool struct { + *sync.Pool + debugMap map[*formatValidator]status + allocMap map[*formatValidator]string + redeemMap map[*formatValidator]string + mx sync.Mutex + } + + typeValidatorsPool struct { + *sync.Pool + debugMap map[*typeValidator]status + allocMap map[*typeValidator]string + redeemMap map[*typeValidator]string + mx sync.Mutex + } + + schemasPool struct { + *sync.Pool + debugMap map[*spec.Schema]status + allocMap map[*spec.Schema]string + redeemMap map[*spec.Schema]string + mx sync.Mutex + } + + resultsPool struct { + *sync.Pool + debugMap map[*Result]status + allocMap map[*Result]string + redeemMap map[*Result]string + mx sync.Mutex + } +) + +func (p *schemaValidatorsPool) BorrowValidator() *SchemaValidator { + s := p.Get().(*SchemaValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *objectValidatorsPool) BorrowValidator() *objectValidator { + s := p.Get().(*objectValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled object should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed object should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed object should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + s := p.Get().(*schemaSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schemaSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *itemsValidatorsPool) BorrowValidator() *itemsValidator { + s := p.Get().(*itemsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled itemsValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed itemsValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed itemsValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + s := p.Get().(*basicCommonValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicCommonValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicCommonValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicCommonValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *headerValidatorsPool) BorrowValidator() *HeaderValidator { + s := p.Get().(*HeaderValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled HeaderValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed header should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed header should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *paramValidatorsPool) BorrowValidator() *ParamValidator { + s := p.Get().(*ParamValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed param should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed param should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + s := p.Get().(*basicSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *numberValidatorsPool) BorrowValidator() *numberValidator { + s := p.Get().(*numberValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled number should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed number should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed number should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *stringValidatorsPool) BorrowValidator() *stringValidator { + s := p.Get().(*stringValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled string should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed string should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed string should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + s := p.Get().(*schemaPropsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaProps should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaProps should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *formatValidatorsPool) BorrowValidator() *formatValidator { + s := p.Get().(*formatValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled format should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed format should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed format should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *typeValidatorsPool) BorrowValidator() *typeValidator { + s := p.Get().(*typeValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled type should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed type should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic(fmt.Errorf("redeemed type should have been allocated from a fresh or recycled pointer. Got status %s, already redeamed at: %s", x, p.redeemMap[s])) + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemasPool) BorrowSchema() *spec.Schema { + s := p.Get().(*spec.Schema) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled spec.Schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemasPool) RedeemSchema(s *spec.Schema) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed spec.Schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed spec.Schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *resultsPool) BorrowResult() *Result { + s := p.Get().(*Result).cleared() + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled result should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + if len(s.Errors) > 0 || len(s.Warnings) > 0 { + panic("empty result should not mutate") + } + return + } + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed Result should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed Result should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *allPools) allIsRedeemed(t testing.TB) bool { + outcome := true + for k, v := range p.poolOfSchemaValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaValidator should be redeemed. Allocated by: %s", p.poolOfSchemaValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfObjectValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("objectValidator should be redeemed. Allocated by: %s", p.poolOfObjectValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("sliceValidator should be redeemed. Allocated by: %s", p.poolOfSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfItemsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("itemsValidator should be redeemed. Allocated by: %s", p.poolOfItemsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicCommonValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicCommonValidator should be redeemed. Allocated by: %s", p.poolOfBasicCommonValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfHeaderValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("headerValidator should be redeemed. Allocated by: %s", p.poolOfHeaderValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfParamValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("paramValidator should be redeemed. Allocated by: %s", p.poolOfParamValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicSliceValidator should be redeemed. Allocated by: %s", p.poolOfBasicSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfNumberValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("numberValidator should be redeemed. Allocated by: %s", p.poolOfNumberValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfStringValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("stringValidator should be redeemed. Allocated by: %s", p.poolOfStringValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemaPropsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaPropsValidator should be redeemed. Allocated by: %s", p.poolOfSchemaPropsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfFormatValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("formatValidator should be redeemed. Allocated by: %s", p.poolOfFormatValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfTypeValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("typeValidator should be redeemed. Allocated by: %s", p.poolOfTypeValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemas.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemas should be redeemed. Allocated by: %s", p.poolOfSchemas.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfResults.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("result should be redeemed. Allocated by: %s", p.poolOfResults.allocMap[k]) + outcome = false + } + + return outcome +} + +func caller() string { + pc, _, _, _ := runtime.Caller(3) //nolint:dogsled + from, line := runtime.FuncForPC(pc).FileLine(pc) + + return fmt.Sprintf("%s:%d", from, line) +} diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go index 8f5f935e5..c80804a93 100644 --- a/vendor/github.com/go-openapi/validate/result.go +++ b/vendor/github.com/go-openapi/validate/result.go @@ -15,7 +15,7 @@ package validate import ( - "fmt" + stderrors "errors" "reflect" "strings" @@ -23,6 +23,8 @@ import ( "github.com/go-openapi/spec" ) +var emptyResult = &Result{MatchCount: 1} + // Result represents a validation result set, composed of // errors and warnings. // @@ -50,8 +52,10 @@ type Result struct { // Schemata for slice items itemSchemata []itemSchemata - cachedFieldSchemta map[FieldKey][]*spec.Schema - cachedItemSchemata map[ItemKey][]*spec.Schema + cachedFieldSchemata map[FieldKey][]*spec.Schema + cachedItemSchemata map[ItemKey][]*spec.Schema + + wantsRedeemOnMerge bool } // FieldKey is a pair of an object and a field, usable as a key for a map. @@ -116,6 +120,9 @@ func (r *Result) Merge(others ...*Result) *Result { } r.mergeWithoutRootSchemata(other) r.rootObjectSchemata.Append(other.rootObjectSchemata) + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } } return r } @@ -132,10 +139,9 @@ func (r *Result) RootObjectSchemata() []*spec.Schema { } // FieldSchemata returns the schemata which apply to fields in objects. -// nolint: dupl func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { - if r.cachedFieldSchemta != nil { - return r.cachedFieldSchemta + if r.cachedFieldSchemata != nil { + return r.cachedFieldSchemata } ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata)) @@ -147,12 +153,12 @@ func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { ret[key] = append(ret[key], fs.schemata.multiple...) } } - r.cachedFieldSchemta = ret + r.cachedFieldSchemata = ret + return ret } // ItemSchemata returns the schemata which apply to items in slices. -// nolint: dupl func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { if r.cachedItemSchemata != nil { return r.cachedItemSchemata @@ -172,12 +178,13 @@ func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { } func (r *Result) resetCaches() { - r.cachedFieldSchemta = nil + r.cachedFieldSchemata = nil r.cachedItemSchemata = nil } // mergeForField merges other into r, assigning other's root schemata to the given Object and field name. -// nolint: unparam +// +//nolint:unparam func (r *Result) mergeForField(obj map[string]interface{}, field string, other *Result) *Result { if other == nil { return r @@ -188,18 +195,23 @@ func (r *Result) mergeForField(obj map[string]interface{}, field string, other * if r.fieldSchemata == nil { r.fieldSchemata = make([]fieldSchemata, len(obj)) } + // clone other schemata, as other is about to be redeemed to the pool r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{ obj: obj, field: field, - schemata: other.rootObjectSchemata, + schemata: other.rootObjectSchemata.Clone(), }) } + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } return r } // mergeForSlice merges other into r, assigning other's root schemata to the given slice and index. -// nolint: unparam +// +//nolint:unparam func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Result { if other == nil { return r @@ -210,29 +222,38 @@ func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Resul if r.itemSchemata == nil { r.itemSchemata = make([]itemSchemata, slice.Len()) } + // clone other schemata, as other is about to be redeemed to the pool r.itemSchemata = append(r.itemSchemata, itemSchemata{ slice: slice, index: i, - schemata: other.rootObjectSchemata, + schemata: other.rootObjectSchemata.Clone(), }) } + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } + return r } // addRootObjectSchemata adds the given schemata for the root object of the result. -// The slice schemata might be reused. I.e. do not modify it after being added to a result. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. func (r *Result) addRootObjectSchemata(s *spec.Schema) { - r.rootObjectSchemata.Append(schemata{one: s}) + clone := *s + r.rootObjectSchemata.Append(schemata{one: &clone}) } // addPropertySchemata adds the given schemata for the object and field. -// The slice schemata might be reused. I.e. do not modify it after being added to a result. +// +// Since the slice schemata might be reused, it is shallow-cloned before saving it into the result. func (r *Result) addPropertySchemata(obj map[string]interface{}, fld string, schema *spec.Schema) { if r.fieldSchemata == nil { r.fieldSchemata = make([]fieldSchemata, 0, len(obj)) } - r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: schema}}) + clone := *schema + r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{obj: obj, field: fld, schemata: schemata{one: &clone}}) } /* @@ -255,17 +276,21 @@ func (r *Result) mergeWithoutRootSchemata(other *Result) { if other.fieldSchemata != nil { if r.fieldSchemata == nil { - r.fieldSchemata = other.fieldSchemata - } else { - r.fieldSchemata = append(r.fieldSchemata, other.fieldSchemata...) + r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata)) + } + for _, field := range other.fieldSchemata { + field.schemata = field.schemata.Clone() + r.fieldSchemata = append(r.fieldSchemata, field) } } if other.itemSchemata != nil { if r.itemSchemata == nil { - r.itemSchemata = other.itemSchemata - } else { - r.itemSchemata = append(r.itemSchemata, other.itemSchemata...) + r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata)) + } + for _, field := range other.itemSchemata { + field.schemata = field.schemata.Clone() + r.itemSchemata = append(r.itemSchemata, field) } } } @@ -280,6 +305,9 @@ func (r *Result) MergeAsErrors(others ...*Result) *Result { r.AddErrors(other.Errors...) r.AddErrors(other.Warnings...) r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } } } return r @@ -295,6 +323,9 @@ func (r *Result) MergeAsWarnings(others ...*Result) *Result { r.AddWarnings(other.Errors...) r.AddWarnings(other.Warnings...) r.MatchCount += other.MatchCount + if other.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(other) + } } } return r @@ -356,16 +387,21 @@ func (r *Result) keepRelevantErrors() *Result { strippedErrors := []error{} for _, e := range r.Errors { if strings.HasPrefix(e.Error(), "IMPORTANT!") { - strippedErrors = append(strippedErrors, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) + strippedErrors = append(strippedErrors, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) } } strippedWarnings := []error{} for _, e := range r.Warnings { if strings.HasPrefix(e.Error(), "IMPORTANT!") { - strippedWarnings = append(strippedWarnings, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) + strippedWarnings = append(strippedWarnings, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) } } - strippedResult := new(Result) + var strippedResult *Result + if r.wantsRedeemOnMerge { + strippedResult = pools.poolOfResults.BorrowResult() + } else { + strippedResult = new(Result) + } strippedResult.Errors = strippedErrors strippedResult.Warnings = strippedWarnings return strippedResult @@ -427,6 +463,27 @@ func (r *Result) AsError() error { return errors.CompositeValidationError(r.Errors...) } +func (r *Result) cleared() *Result { + // clear the Result to be reusable. Keep allocated capacity. + r.Errors = r.Errors[:0] + r.Warnings = r.Warnings[:0] + r.MatchCount = 0 + r.data = nil + r.rootObjectSchemata.one = nil + r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0] + r.fieldSchemata = r.fieldSchemata[:0] + r.itemSchemata = r.itemSchemata[:0] + for k := range r.cachedFieldSchemata { + delete(r.cachedFieldSchemata, k) + } + for k := range r.cachedItemSchemata { + delete(r.cachedItemSchemata, k) + } + r.wantsRedeemOnMerge = true // mark this result as eligible for redeem when merged into another + + return r +} + // schemata is an arbitrary number of schemata. It does a distinction between zero, // one and many schemata to avoid slice allocations. type schemata struct { @@ -453,7 +510,7 @@ func (s *schemata) Slice() []*spec.Schema { return s.multiple } -// appendSchemata appends the schemata in other to s. It mutated s in-place. +// appendSchemata appends the schemata in other to s. It mutates s in-place. func (s *schemata) Append(other schemata) { if other.one == nil && len(other.multiple) == 0 { return @@ -484,3 +541,23 @@ func (s *schemata) Append(other schemata) { } } } + +func (s schemata) Clone() schemata { + var clone schemata + + if s.one != nil { + clone.one = new(spec.Schema) + *clone.one = *s.one + } + + if len(s.multiple) > 0 { + clone.multiple = make([]*spec.Schema, len(s.multiple)) + for idx := 0; idx < len(s.multiple); idx++ { + sp := new(spec.Schema) + *sp = *s.multiple[idx] + clone.multiple[idx] = sp + } + } + + return clone +} diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go index b817eb0ef..db65264fd 100644 --- a/vendor/github.com/go-openapi/validate/schema.go +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -24,32 +24,32 @@ import ( "github.com/go-openapi/swag" ) -var ( - specSchemaType = reflect.TypeOf(&spec.Schema{}) - specParameterType = reflect.TypeOf(&spec.Parameter{}) - specHeaderType = reflect.TypeOf(&spec.Header{}) - // specItemsType = reflect.TypeOf(&spec.Items{}) -) - // SchemaValidator validates data against a JSON schema type SchemaValidator struct { Path string in string Schema *spec.Schema - validators []valueValidator + validators [8]valueValidator Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions } // AgainstSchema validates the specified data against the provided schema, using a registry of supported formats. // // When no pre-parsed *spec.Schema structure is provided, it uses a JSON schema as default. See example. func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registry, options ...Option) error { - res := NewSchemaValidator(schema, nil, "", formats, options...).Validate(data) + res := NewSchemaValidator(schema, nil, "", formats, + append(options, WithRecycleValidators(true), withRecycleResults(true))..., + ).Validate(data) + defer func() { + pools.poolOfResults.RedeemResult(res) + }() + if res.HasErrors() { return errors.CompositeValidationError(res.Errors...) } + return nil } @@ -57,6 +57,15 @@ func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registr // // Panics if the provided schema is invalid. func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...Option) *SchemaValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newSchemaValidator(schema, rootSchema, root, formats, opts) +} + +func newSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, opts *SchemaValidatorOptions) *SchemaValidator { if schema == nil { return nil } @@ -72,17 +81,26 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string panic(msg) } } - s := SchemaValidator{ - Path: root, - in: "body", - Schema: schema, - Root: rootSchema, - KnownFormats: formats, - Options: SchemaValidatorOptions{}} - for _, o := range options { - o(&s.Options) + + if opts == nil { + opts = new(SchemaValidatorOptions) } - s.validators = []valueValidator{ + + var s *SchemaValidator + if opts.recycleValidators { + s = pools.poolOfSchemaValidators.BorrowValidator() + } else { + s = new(SchemaValidator) + } + + s.Path = root + s.in = "body" + s.Schema = schema + s.Root = rootSchema + s.Options = opts + s.KnownFormats = formats + + s.validators = [8]valueValidator{ s.typeValidator(), s.schemaPropsValidator(), s.stringValidator(), @@ -92,7 +110,8 @@ func NewSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string s.commonValidator(), s.objectValidator(), } - return &s + + return s } // SetPath sets the path for this schema valdiator @@ -101,24 +120,46 @@ func (s *SchemaValidator) SetPath(path string) { } // Applies returns true when this schema validator applies -func (s *SchemaValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (s *SchemaValidator) Applies(source interface{}, _ reflect.Kind) bool { _, ok := source.(*spec.Schema) return ok } // Validate validates the data against the schema func (s *SchemaValidator) Validate(data interface{}) *Result { - result := &Result{data: data} if s == nil { - return result + return emptyResult } - if s.Schema != nil { + + if s.Options.recycleValidators { + defer func() { + s.redeemChildren() + s.redeem() // one-time use validator + }() + } + + var result *Result + if s.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + result.data = data + } else { + result = &Result{data: data} + } + + if s.Schema != nil && !s.Options.skipSchemataResult { result.addRootObjectSchemata(s.Schema) } if data == nil { + // early exit with minimal validation result.Merge(s.validators[0].Validate(data)) // type validator result.Merge(s.validators[6].Validate(data)) // common validator + + if s.Options.recycleValidators { + s.validators[0] = nil + s.validators[6] = nil + } + return result } @@ -147,6 +188,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { if erri != nil { result.AddErrors(invalidTypeConversionMsg(s.Path, erri)) result.Inc() + return result } d = in @@ -155,6 +197,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { if errf != nil { result.AddErrors(invalidTypeConversionMsg(s.Path, errf)) result.Inc() + return result } d = nf @@ -164,14 +207,26 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { kind = tpe.Kind() } - for _, v := range s.validators { + for idx, v := range s.validators { if !v.Applies(s.Schema, kind) { - debugLog("%T does not apply for %v", v, kind) + if s.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := v.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := v.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[idx] = nil // prevents further (unsafe) usage + } + continue } - err := v.Validate(d) - result.Merge(err) + result.Merge(v.Validate(d)) + if s.Options.recycleValidators { + s.validators[idx] = nil // prevents further (unsafe) usage + } result.Inc() } result.Inc() @@ -180,81 +235,120 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { } func (s *SchemaValidator) typeValidator() valueValidator { - return &typeValidator{Type: s.Schema.Type, Nullable: s.Schema.Nullable, Format: s.Schema.Format, In: s.in, Path: s.Path} + return newTypeValidator( + s.Path, + s.in, + s.Schema.Type, + s.Schema.Nullable, + s.Schema.Format, + s.Options, + ) } func (s *SchemaValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: s.Path, - In: s.in, - Enum: s.Schema.Enum, - } + return newBasicCommonValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.Enum, + s.Options, + ) } func (s *SchemaValidator) sliceValidator() valueValidator { - return &schemaSliceValidator{ - Path: s.Path, - In: s.in, - MaxItems: s.Schema.MaxItems, - MinItems: s.Schema.MinItems, - UniqueItems: s.Schema.UniqueItems, - AdditionalItems: s.Schema.AdditionalItems, - Items: s.Schema.Items, - Root: s.Root, - KnownFormats: s.KnownFormats, - Options: s.Options, - } + return newSliceValidator( + s.Path, + s.in, + s.Schema.MaxItems, + s.Schema.MinItems, + s.Schema.UniqueItems, + s.Schema.AdditionalItems, + s.Schema.Items, + s.Root, + s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: s.Path, - In: s.in, - Default: s.Schema.Default, - MultipleOf: s.Schema.MultipleOf, - Maximum: s.Schema.Maximum, - ExclusiveMaximum: s.Schema.ExclusiveMaximum, - Minimum: s.Schema.Minimum, - ExclusiveMinimum: s.Schema.ExclusiveMinimum, - } + return newNumberValidator( + s.Path, + s.in, + s.Schema.Default, + s.Schema.MultipleOf, + s.Schema.Maximum, + s.Schema.ExclusiveMaximum, + s.Schema.Minimum, + s.Schema.ExclusiveMinimum, + "", + "", + s.Options, + ) } func (s *SchemaValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: s.Path, - In: s.in, - MaxLength: s.Schema.MaxLength, - MinLength: s.Schema.MinLength, - Pattern: s.Schema.Pattern, - } + return newStringValidator( + s.Path, + s.in, + nil, + false, + false, + s.Schema.MaxLength, + s.Schema.MinLength, + s.Schema.Pattern, + s.Options, + ) } func (s *SchemaValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: s.Path, - In: s.in, - Format: s.Schema.Format, - KnownFormats: s.KnownFormats, - } + return newFormatValidator( + s.Path, + s.in, + s.Schema.Format, + s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) schemaPropsValidator() valueValidator { sch := s.Schema - return newSchemaPropsValidator(s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, s.Options.Options()...) + return newSchemaPropsValidator( + s.Path, s.in, sch.AllOf, sch.OneOf, sch.AnyOf, sch.Not, sch.Dependencies, s.Root, s.KnownFormats, + s.Options, + ) } func (s *SchemaValidator) objectValidator() valueValidator { - return &objectValidator{ - Path: s.Path, - In: s.in, - MaxProperties: s.Schema.MaxProperties, - MinProperties: s.Schema.MinProperties, - Required: s.Schema.Required, - Properties: s.Schema.Properties, - AdditionalProperties: s.Schema.AdditionalProperties, - PatternProperties: s.Schema.PatternProperties, - Root: s.Root, - KnownFormats: s.KnownFormats, - Options: s.Options, + return newObjectValidator( + s.Path, + s.in, + s.Schema.MaxProperties, + s.Schema.MinProperties, + s.Schema.Required, + s.Schema.Properties, + s.Schema.AdditionalProperties, + s.Schema.PatternProperties, + s.Root, + s.KnownFormats, + s.Options, + ) +} + +func (s *SchemaValidator) redeem() { + pools.poolOfSchemaValidators.RedeemValidator(s) +} + +func (s *SchemaValidator) redeemChildren() { + for i, validator := range s.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + s.validators[i] = nil // free up allocated children if not in pool } } diff --git a/vendor/github.com/go-openapi/validate/schema_option.go b/vendor/github.com/go-openapi/validate/schema_option.go index 4b4879de8..65eeebeaa 100644 --- a/vendor/github.com/go-openapi/validate/schema_option.go +++ b/vendor/github.com/go-openapi/validate/schema_option.go @@ -18,6 +18,9 @@ package validate type SchemaValidatorOptions struct { EnableObjectArrayTypeCheck bool EnableArrayMustHaveItemsCheck bool + recycleValidators bool + recycleResult bool + skipSchemataResult bool } // Option sets optional rules for schema validation @@ -45,10 +48,36 @@ func SwaggerSchema(enable bool) Option { } } -// Options returns current options +// WithRecycleValidators saves memory allocations and makes validators +// available for a single use of Validate() only. +// +// When a validator is recycled, called MUST not call the Validate() method twice. +func WithRecycleValidators(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleValidators = enable + } +} + +func withRecycleResults(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.recycleResult = enable + } +} + +// WithSkipSchemataResult skips the deep audit payload stored in validation Result +func WithSkipSchemataResult(enable bool) Option { + return func(svo *SchemaValidatorOptions) { + svo.skipSchemataResult = enable + } +} + +// Options returns the current set of options func (svo SchemaValidatorOptions) Options() []Option { return []Option{ EnableObjectArrayTypeCheck(svo.EnableObjectArrayTypeCheck), EnableArrayMustHaveItemsCheck(svo.EnableArrayMustHaveItemsCheck), + WithRecycleValidators(svo.recycleValidators), + withRecycleResults(svo.recycleResult), + WithSkipSchemataResult(svo.skipSchemataResult), } } diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go index 9bac3d29f..1ca379244 100644 --- a/vendor/github.com/go-openapi/validate/schema_props.go +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -30,211 +30,327 @@ type schemaPropsValidator struct { AnyOf []spec.Schema Not *spec.Schema Dependencies spec.Dependencies - anyOfValidators []SchemaValidator - allOfValidators []SchemaValidator - oneOfValidators []SchemaValidator + anyOfValidators []*SchemaValidator + allOfValidators []*SchemaValidator + oneOfValidators []*SchemaValidator notValidator *SchemaValidator Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions } func (s *schemaPropsValidator) SetPath(path string) { s.Path = path } -func newSchemaPropsValidator(path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, options ...Option) *schemaPropsValidator { - anyValidators := make([]SchemaValidator, 0, len(anyOf)) - for _, v := range anyOf { - v := v - anyValidators = append(anyValidators, *NewSchemaValidator(&v, root, path, formats, options...)) +func newSchemaPropsValidator( + path string, in string, allOf, oneOf, anyOf []spec.Schema, not *spec.Schema, deps spec.Dependencies, root interface{}, formats strfmt.Registry, + opts *SchemaValidatorOptions) *schemaPropsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) } - allValidators := make([]SchemaValidator, 0, len(allOf)) - for _, v := range allOf { - v := v - allValidators = append(allValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + + anyValidators := make([]*SchemaValidator, 0, len(anyOf)) + for i := range anyOf { + anyValidators = append(anyValidators, newSchemaValidator(&anyOf[i], root, path, formats, opts)) } - oneValidators := make([]SchemaValidator, 0, len(oneOf)) - for _, v := range oneOf { - v := v - oneValidators = append(oneValidators, *NewSchemaValidator(&v, root, path, formats, options...)) + allValidators := make([]*SchemaValidator, 0, len(allOf)) + for i := range allOf { + allValidators = append(allValidators, newSchemaValidator(&allOf[i], root, path, formats, opts)) + } + oneValidators := make([]*SchemaValidator, 0, len(oneOf)) + for i := range oneOf { + oneValidators = append(oneValidators, newSchemaValidator(&oneOf[i], root, path, formats, opts)) } var notValidator *SchemaValidator if not != nil { - notValidator = NewSchemaValidator(not, root, path, formats, options...) + notValidator = newSchemaValidator(not, root, path, formats, opts) } - schOptions := &SchemaValidatorOptions{} - for _, o := range options { - o(schOptions) - } - return &schemaPropsValidator{ - Path: path, - In: in, - AllOf: allOf, - OneOf: oneOf, - AnyOf: anyOf, - Not: not, - Dependencies: deps, - anyOfValidators: anyValidators, - allOfValidators: allValidators, - oneOfValidators: oneValidators, - notValidator: notValidator, - Root: root, - KnownFormats: formats, - Options: *schOptions, + var s *schemaPropsValidator + if opts.recycleValidators { + s = pools.poolOfSchemaPropsValidators.BorrowValidator() + } else { + s = new(schemaPropsValidator) } + + s.Path = path + s.In = in + s.AllOf = allOf + s.OneOf = oneOf + s.AnyOf = anyOf + s.Not = not + s.Dependencies = deps + s.anyOfValidators = anyValidators + s.allOfValidators = allValidators + s.oneOfValidators = oneValidators + s.notValidator = notValidator + s.Root = root + s.KnownFormats = formats + s.Options = opts + + return s } -func (s *schemaPropsValidator) Applies(source interface{}, kind reflect.Kind) bool { - r := reflect.TypeOf(source) == specSchemaType - debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind) - return r +func (s *schemaPropsValidator) Applies(source interface{}, _ reflect.Kind) bool { + _, isSchema := source.(*spec.Schema) + return isSchema } func (s *schemaPropsValidator) Validate(data interface{}) *Result { - mainResult := new(Result) + var mainResult *Result + if s.Options.recycleResult { + mainResult = pools.poolOfResults.BorrowResult() + } else { + mainResult = new(Result) + } // Intermediary error results // IMPORTANT! messages from underlying validators - keepResultAnyOf := new(Result) - keepResultOneOf := new(Result) - keepResultAllOf := new(Result) + var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result + + if s.Options.recycleValidators { + defer func() { + s.redeemChildren() + s.redeem() + + // results are redeemed when merged + }() + } - // Validates at least one in anyOf schemas - var firstSuccess *Result if len(s.anyOfValidators) > 0 { - var bestFailures *Result - succeededOnce := false - for _, anyOfSchema := range s.anyOfValidators { - result := anyOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultAnyOf.Merge(result.keepRelevantErrors()) - if result.IsValid() { - bestFailures = nil - succeededOnce = true - if firstSuccess == nil { - firstSuccess = result - } - keepResultAnyOf = new(Result) - break - } - // MatchCount is used to select errors from the schema with most positive checks - if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { - bestFailures = result - } - } - - if !succeededOnce { - mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) - } - if bestFailures != nil { - mainResult.Merge(bestFailures) - } else if firstSuccess != nil { - mainResult.Merge(firstSuccess) - } + keepResultAnyOf = pools.poolOfResults.BorrowResult() + s.validateAnyOf(data, mainResult, keepResultAnyOf) } - // Validates exactly one in oneOf schemas if len(s.oneOfValidators) > 0 { - var bestFailures *Result - var firstSuccess *Result - validated := 0 - - for _, oneOfSchema := range s.oneOfValidators { - result := oneOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultOneOf.Merge(result.keepRelevantErrors()) - if result.IsValid() { - validated++ - bestFailures = nil - if firstSuccess == nil { - firstSuccess = result - } - keepResultOneOf = new(Result) - continue - } - // MatchCount is used to select errors from the schema with most positive checks - if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { - bestFailures = result - } - } - - if validated != 1 { - var additionalMsg string - if validated == 0 { - additionalMsg = "Found none valid" - } else { - additionalMsg = fmt.Sprintf("Found %d valid alternatives", validated) - } - - mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, additionalMsg)) - if bestFailures != nil { - mainResult.Merge(bestFailures) - } - } else if firstSuccess != nil { - mainResult.Merge(firstSuccess) - } + keepResultOneOf = pools.poolOfResults.BorrowResult() + s.validateOneOf(data, mainResult, keepResultOneOf) } - // Validates all of allOf schemas if len(s.allOfValidators) > 0 { - validated := 0 - - for _, allOfSchema := range s.allOfValidators { - result := allOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultAllOf.Merge(result.keepRelevantErrors()) - // keepResultAllOf.Merge(result) - if result.IsValid() { - validated++ - } - mainResult.Merge(result) - } - - if validated != len(s.allOfValidators) { - additionalMsg := "" - if validated == 0 { - additionalMsg = ". None validated" - } - - mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, additionalMsg)) - } + keepResultAllOf = pools.poolOfResults.BorrowResult() + s.validateAllOf(data, mainResult, keepResultAllOf) } if s.notValidator != nil { - result := s.notValidator.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - if result.IsValid() { - mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) - } + s.validateNot(data, mainResult) } if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map { - val := data.(map[string]interface{}) - for key := range val { - if dep, ok := s.Dependencies[key]; ok { - - if dep.Schema != nil { - mainResult.Merge(NewSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options.Options()...).Validate(data)) - continue - } - - if len(dep.Property) > 0 { - for _, depKey := range dep.Property { - if _, ok := val[depKey]; !ok { - mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) - } - } - } - } - } + s.validateDependencies(data, mainResult) } mainResult.Inc() + // In the end we retain best failures for schema validation // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) } + +func (s *schemaPropsValidator) validateAnyOf(data interface{}, mainResult, keepResultAnyOf *Result) { + // Validates at least one in anyOf schemas + var bestFailures *Result + + for i, anyOfSchema := range s.anyOfValidators { + result := anyOfSchema.Validate(data) + if s.Options.recycleValidators { + s.anyOfValidators[i] = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result + + if result.IsValid() { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + + _ = keepResultAnyOf.cleared() + mainResult.Merge(result) + + return + } + + // MatchCount is used to select errors from the schema with most positive checks + if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + bestFailures = result + + continue + } + + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + } + + mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) + mainResult.Merge(bestFailures) +} + +func (s *schemaPropsValidator) validateOneOf(data interface{}, mainResult, keepResultOneOf *Result) { + // Validates exactly one in oneOf schemas + var ( + firstSuccess, bestFailures *Result + validated int + ) + + for i, oneOfSchema := range s.oneOfValidators { + result := oneOfSchema.Validate(data) + if s.Options.recycleValidators { + s.oneOfValidators[i] = nil + } + + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result + + if result.IsValid() { + validated++ + _ = keepResultOneOf.cleared() + + if firstSuccess == nil { + firstSuccess = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + + continue + } + + // MatchCount is used to select errors from the schema with most positive checks + if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + bestFailures = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } + } + + switch validated { + case 0: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid")) + mainResult.Merge(bestFailures) + // firstSucess necessarily nil + case 1: + mainResult.Merge(firstSuccess) + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } + default: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated))) + mainResult.Merge(bestFailures) + if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(firstSuccess) + } + } +} + +func (s *schemaPropsValidator) validateAllOf(data interface{}, mainResult, keepResultAllOf *Result) { + // Validates all of allOf schemas + var validated int + + for i, allOfSchema := range s.allOfValidators { + result := allOfSchema.Validate(data) + if s.Options.recycleValidators { + s.allOfValidators[i] = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAllOf.Merge(result.keepRelevantErrors()) + if result.IsValid() { + validated++ + } + mainResult.Merge(result) + } + + switch validated { + case 0: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated")) + case len(s.allOfValidators): + default: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, "")) + } +} + +func (s *schemaPropsValidator) validateNot(data interface{}, mainResult *Result) { + result := s.notValidator.Validate(data) + if s.Options.recycleValidators { + s.notValidator = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + if result.IsValid() { + mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) + } + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } +} + +func (s *schemaPropsValidator) validateDependencies(data interface{}, mainResult *Result) { + val := data.(map[string]interface{}) + for key := range val { + dep, ok := s.Dependencies[key] + if !ok { + continue + } + + if dep.Schema != nil { + mainResult.Merge( + newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data), + ) + continue + } + + if len(dep.Property) > 0 { + for _, depKey := range dep.Property { + if _, ok := val[depKey]; !ok { + mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) + } + } + } + } +} + +func (s *schemaPropsValidator) redeem() { + pools.poolOfSchemaPropsValidators.RedeemValidator(s) +} + +func (s *schemaPropsValidator) redeemChildren() { + for _, v := range s.anyOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.anyOfValidators = nil + + for _, v := range s.allOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.allOfValidators = nil + + for _, v := range s.oneOfValidators { + if v == nil { + continue + } + v.redeemChildren() + v.redeem() + } + s.oneOfValidators = nil + + if s.notValidator != nil { + s.notValidator.redeemChildren() + s.notValidator.redeem() + s.notValidator = nil + } +} diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go index aa429f518..13bb02087 100644 --- a/vendor/github.com/go-openapi/validate/slice_validator.go +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -32,7 +32,36 @@ type schemaSliceValidator struct { Items *spec.SchemaOrArray Root interface{} KnownFormats strfmt.Registry - Options SchemaValidatorOptions + Options *SchemaValidatorOptions +} + +func newSliceValidator(path, in string, + maxItems, minItems *int64, uniqueItems bool, + additionalItems *spec.SchemaOrBool, items *spec.SchemaOrArray, + root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *schemaSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var v *schemaSliceValidator + if opts.recycleValidators { + v = pools.poolOfSliceValidators.BorrowValidator() + } else { + v = new(schemaSliceValidator) + } + + v.Path = path + v.In = in + v.MaxItems = maxItems + v.MinItems = minItems + v.UniqueItems = uniqueItems + v.AdditionalItems = additionalItems + v.Items = items + v.Root = root + v.KnownFormats = formats + v.Options = opts + + return v } func (s *schemaSliceValidator) SetPath(path string) { @@ -46,7 +75,18 @@ func (s *schemaSliceValidator) Applies(source interface{}, kind reflect.Kind) bo } func (s *schemaSliceValidator) Validate(data interface{}) *Result { - result := new(Result) + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + + var result *Result + if s.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } if data == nil { return result } @@ -54,8 +94,8 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { size := val.Len() if s.Items != nil && s.Items.Schema != nil { - validator := NewSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options.Options()...) for i := 0; i < size; i++ { + validator := newSchemaValidator(s.Items.Schema, s.Root, s.Path, s.KnownFormats, s.Options) validator.SetPath(fmt.Sprintf("%s.%d", s.Path, i)) value := val.Index(i) result.mergeForSlice(val, i, validator.Validate(value.Interface())) @@ -66,10 +106,11 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { if s.Items != nil && len(s.Items.Schemas) > 0 { itemsSize = len(s.Items.Schemas) for i := 0; i < itemsSize; i++ { - validator := NewSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...) - if val.Len() <= i { + if size <= i { break } + + validator := newSchemaValidator(&s.Items.Schemas[i], s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) } } @@ -79,7 +120,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { } if s.AdditionalItems.Schema != nil { for i := itemsSize; i < size-itemsSize+1; i++ { - validator := NewSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options.Options()...) + validator := newSchemaValidator(s.AdditionalItems.Schema, s.Root, fmt.Sprintf("%s.%d", s.Path, i), s.KnownFormats, s.Options) result.mergeForSlice(val, i, validator.Validate(val.Index(i).Interface())) } } @@ -103,3 +144,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { result.Inc() return result } + +func (s *schemaSliceValidator) redeem() { + pools.poolOfSliceValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index dff01f00b..965452566 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -15,6 +15,8 @@ package validate import ( + "bytes" + "encoding/gob" "encoding/json" "fmt" "sort" @@ -26,23 +28,23 @@ import ( "github.com/go-openapi/loads" "github.com/go-openapi/spec" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" ) // Spec validates an OpenAPI 2.0 specification document. // // Returns an error flattening in a single standard error, all validation messages. // -// - TODO: $ref should not have siblings -// - TODO: make sure documentation reflects all checks and warnings -// - TODO: check on discriminators -// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) -// - TODO: full list of unresolved refs -// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples -// - TODO: option to determine if we validate for go-swagger or in a more general context -// - TODO: check on required properties to support anyOf, allOf, oneOf +// - TODO: $ref should not have siblings +// - TODO: make sure documentation reflects all checks and warnings +// - TODO: check on discriminators +// - TODO: explicit message on unsupported keywords (better than "forbidden property"...) +// - TODO: full list of unresolved refs +// - TODO: validate numeric constraints (issue#581): this should be handled like defaults and examples +// - TODO: option to determine if we validate for go-swagger or in a more general context +// - TODO: check on required properties to support anyOf, allOf, oneOf // // NOTE: SecurityScopes are maps: no need to check uniqueness -// func Spec(doc *loads.Document, formats strfmt.Registry) error { errs, _ /*warns*/ := NewSpecValidator(doc.Schema(), formats).Validate(doc) if errs.HasErrors() { @@ -53,25 +55,38 @@ func Spec(doc *loads.Document, formats strfmt.Registry) error { // SpecValidator validates a swagger 2.0 spec type SpecValidator struct { - schema *spec.Schema // swagger 2.0 schema - spec *loads.Document - analyzer *analysis.Spec - expanded *loads.Document - KnownFormats strfmt.Registry - Options Opts // validation options + schema *spec.Schema // swagger 2.0 schema + spec *loads.Document + analyzer *analysis.Spec + expanded *loads.Document + KnownFormats strfmt.Registry + Options Opts // validation options + schemaOptions *SchemaValidatorOptions } // NewSpecValidator creates a new swagger spec validator instance func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidator { + // schema options that apply to all called validators + schemaOptions := new(SchemaValidatorOptions) + for _, o := range []Option{ + SwaggerSchema(true), + WithRecycleValidators(true), + // withRecycleResults(true), + } { + o(schemaOptions) + } + return &SpecValidator{ - schema: schema, - KnownFormats: formats, - Options: defaultOpts, + schema: schema, + KnownFormats: formats, + Options: defaultOpts, + schemaOptions: schemaOptions, } } // Validate validates the swagger spec func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { + s.schemaOptions.skipSchemataResult = s.Options.SkipSchemataResult var sd *loads.Document errs, warnings := new(Result), new(Result) @@ -85,11 +100,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { s.spec = sd s.analyzer = analysis.New(sd.Spec()) - // Swagger schema validator - schv := NewSchemaValidator(s.schema, nil, "", s.KnownFormats, SwaggerSchema(true)) - var obj interface{} - // Raw spec unmarshalling errors + var obj interface{} if err := json.Unmarshal(sd.Raw(), &obj); err != nil { // NOTE: under normal conditions, the *load.Document has been already unmarshalled // So this one is just a paranoid check on the behavior of the spec package @@ -103,6 +115,8 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { warnings.AddErrors(errs.Warnings...) }() + // Swagger schema validator + schv := newSchemaValidator(s.schema, nil, "", s.KnownFormats, s.schemaOptions) errs.Merge(schv.Validate(obj)) // error - // There may be a point in continuing to try and determine more accurate errors if !s.Options.ContinueOnErrors && errs.HasErrors() { @@ -130,13 +144,13 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { } // Values provided as default MUST validate their schema - df := &defaultValidator{SpecValidator: s} + df := &defaultValidator{SpecValidator: s, schemaOptions: s.schemaOptions} errs.Merge(df.Validate()) // Values provided as examples MUST validate their schema // Value provided as examples in a response without schema generate a warning // Known limitations: examples in responses for mime type not application/json are ignored (warning) - ex := &exampleValidator{SpecValidator: s} + ex := &exampleValidator{SpecValidator: s, schemaOptions: s.schemaOptions} errs.Merge(ex.Validate()) errs.Merge(s.validateNonEmptyPathParamNames()) @@ -148,22 +162,27 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { } func (s *SpecValidator) validateNonEmptyPathParamNames() *Result { - res := new(Result) + res := pools.poolOfResults.BorrowResult() if s.spec.Spec().Paths == nil { // There is no Paths object: error res.AddErrors(noValidPathMsg()) - } else { - if s.spec.Spec().Paths.Paths == nil { - // Paths may be empty: warning - res.AddWarnings(noValidPathMsg()) - } else { - for k := range s.spec.Spec().Paths.Paths { - if strings.Contains(k, "{}") { - res.AddErrors(emptyPathParameterMsg(k)) - } - } + + return res + } + + if s.spec.Spec().Paths.Paths == nil { + // Paths may be empty: warning + res.AddWarnings(noValidPathMsg()) + + return res + } + + for k := range s.spec.Spec().Paths.Paths { + if strings.Contains(k, "{}") { + res.AddErrors(emptyPathParameterMsg(k)) } } + return res } @@ -177,7 +196,7 @@ func (s *SpecValidator) validateDuplicateOperationIDs() *Result { // fallback on possible incomplete picture because of previous errors analyzer = s.analyzer } - res := new(Result) + res := pools.poolOfResults.BorrowResult() known := make(map[string]int) for _, v := range analyzer.OperationIDs() { if v != "" { @@ -199,7 +218,7 @@ type dupProp struct { func (s *SpecValidator) validateDuplicatePropertyNames() *Result { // definition can't declare a property that's already defined by one of its ancestors - res := new(Result) + res := pools.poolOfResults.BorrowResult() for k, sch := range s.spec.Spec().Definitions { if len(sch.AllOf) == 0 { continue @@ -248,7 +267,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, schn := nm schc := &sch - res := new(Result) + res := pools.poolOfResults.BorrowResult() for schc.Ref.String() != "" { // gather property names @@ -285,7 +304,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, } func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) { - res := new(Result) + res := pools.poolOfResults.BorrowResult() if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there return nil, res @@ -335,7 +354,7 @@ func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, kno func (s *SpecValidator) validateItems() *Result { // validate parameter, items, schema and response objects for presence of item if type is array - res := new(Result) + res := pools.poolOfResults.BorrowResult() for method, pi := range s.analyzer.Operations() { for path, op := range pi { @@ -394,7 +413,7 @@ func (s *SpecValidator) validateItems() *Result { // Verifies constraints on array type func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { - res := new(Result) + res := pools.poolOfResults.BorrowResult() if !schema.Type.Contains(arrayType) { return res } @@ -418,7 +437,7 @@ func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID str func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result { // Each defined operation path parameters must correspond to a named element in the API's path pattern. // (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.) - res := new(Result) + res := pools.poolOfResults.BorrowResult() for _, l := range fromPath { var matched bool for _, r := range fromOperation { @@ -456,7 +475,6 @@ func (s *SpecValidator) validateReferenced() *Result { return &res } -// nolint: dupl func (s *SpecValidator) validateReferencedParameters() *Result { // Each referenceable definition should have references. params := s.spec.Spec().Parameters @@ -475,14 +493,13 @@ func (s *SpecValidator) validateReferencedParameters() *Result { if len(expected) == 0 { return nil } - result := new(Result) + result := pools.poolOfResults.BorrowResult() for k := range expected { result.AddWarnings(unusedParamMsg(k)) } return result } -// nolint: dupl func (s *SpecValidator) validateReferencedResponses() *Result { // Each referenceable definition should have references. responses := s.spec.Spec().Responses @@ -501,14 +518,13 @@ func (s *SpecValidator) validateReferencedResponses() *Result { if len(expected) == 0 { return nil } - result := new(Result) + result := pools.poolOfResults.BorrowResult() for k := range expected { result.AddWarnings(unusedResponseMsg(k)) } return result } -// nolint: dupl func (s *SpecValidator) validateReferencedDefinitions() *Result { // Each referenceable definition must have references. defs := s.spec.Spec().Definitions @@ -537,7 +553,7 @@ func (s *SpecValidator) validateReferencedDefinitions() *Result { func (s *SpecValidator) validateRequiredDefinitions() *Result { // Each property listed in the required array must be defined in the properties of the model - res := new(Result) + res := pools.poolOfResults.BorrowResult() DEFINITIONS: for d, schema := range s.spec.Spec().Definitions { @@ -556,7 +572,7 @@ DEFINITIONS: func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result { // Takes care of recursive property definitions, which may be nested in additionalProperties schemas - res := new(Result) + res := pools.poolOfResults.BorrowResult() propertyMatch := false patternMatch := false additionalPropertiesMatch := false @@ -615,40 +631,42 @@ func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Sche func (s *SpecValidator) validateParameters() *Result { // - for each method, path is unique, regardless of path parameters // e.g. GET:/petstore/{id}, GET:/petstore/{pet}, GET:/petstore are - // considered duplicate paths + // considered duplicate paths, if StrictPathParamUniqueness is enabled. // - each parameter should have a unique `name` and `type` combination // - each operation should have only 1 parameter of type body // - there must be at most 1 parameter in body // - parameters with pattern property must specify valid patterns // - $ref in parameters must resolve // - path param must be required - res := new(Result) + res := pools.poolOfResults.BorrowResult() rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) for method, pi := range s.expandedAnalyzer().Operations() { methodPaths := make(map[string]map[string]string) for path, op := range pi { - pathToAdd := pathHelp.stripParametersInPath(path) + if s.Options.StrictPathParamUniqueness { + pathToAdd := pathHelp.stripParametersInPath(path) - // Warn on garbled path afer param stripping - if rexGarbledPathSegment.MatchString(pathToAdd) { - res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) - } + // Warn on garbled path afer param stripping + if rexGarbledPathSegment.MatchString(pathToAdd) { + res.AddWarnings(pathStrippedParamGarbledMsg(pathToAdd)) + } - // Check uniqueness of stripped paths - if _, found := methodPaths[method][pathToAdd]; found { + // Check uniqueness of stripped paths + if _, found := methodPaths[method][pathToAdd]; found { - // Sort names for stable, testable output - if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { - res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + // Sort names for stable, testable output + if strings.Compare(path, methodPaths[method][pathToAdd]) < 0 { + res.AddErrors(pathOverlapMsg(path, methodPaths[method][pathToAdd])) + } else { + res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) + } } else { - res.AddErrors(pathOverlapMsg(methodPaths[method][pathToAdd], path)) - } - } else { - if _, found := methodPaths[method]; !found { - methodPaths[method] = map[string]string{} - } - methodPaths[method][pathToAdd] = path // Original non stripped path + if _, found := methodPaths[method]; !found { + methodPaths[method] = map[string]string{} + } + methodPaths[method][pathToAdd] = path // Original non stripped path + } } var bodyParams []string @@ -659,7 +677,23 @@ func (s *SpecValidator) validateParameters() *Result { // TODO: should be done after param expansion res.Merge(s.checkUniqueParams(path, method, op)) + // pick the root schema from the swagger specification which describes a parameter + origSchema, ok := s.schema.Definitions["parameter"] + if !ok { + panic("unexpected swagger schema: missing #/definitions/parameter") + } + // clone it once to avoid expanding a global schema (e.g. swagger spec) + paramSchema, err := deepCloneSchema(origSchema) + if err != nil { + panic(fmt.Errorf("can't clone schema: %v", err)) + } + for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { + // An expanded parameter must validate the Parameter schema (an unexpanded $ref always passes high-level schema validation) + schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions) + obj := swag.ToDynamicJSON(pr) + res.Merge(schv.Validate(obj)) + // Validate pattern regexp for parameters with a Pattern property if _, err := compileRegexp(pr.Pattern); err != nil { res.AddErrors(invalidPatternInParamMsg(op.ID, pr.Name, pr.Pattern)) @@ -741,7 +775,7 @@ func (s *SpecValidator) validateParameters() *Result { func (s *SpecValidator) validateReferencesValid() *Result { // each reference must point to a valid object - res := new(Result) + res := pools.poolOfResults.BorrowResult() for _, r := range s.analyzer.AllRefs() { if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI res.AddErrors(invalidRefMsg(r.String())) @@ -767,7 +801,7 @@ func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operatio // However, there are some issues with such a factorization: // - analysis does not seem to fully expand params // - param keys may be altered by x-go-name - res := new(Result) + res := pools.poolOfResults.BorrowResult() pnames := make(map[string]struct{}) if op.Parameters != nil { // Safeguard @@ -802,3 +836,17 @@ func (s *SpecValidator) expandedAnalyzer() *analysis.Spec { } return s.analyzer } + +func deepCloneSchema(src spec.Schema) (spec.Schema, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return spec.Schema{}, err + } + + var dst spec.Schema + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return spec.Schema{}, err + } + + return dst, nil +} diff --git a/vendor/github.com/go-openapi/validate/spec_messages.go b/vendor/github.com/go-openapi/validate/spec_messages.go index b3757addd..6d1f0f819 100644 --- a/vendor/github.com/go-openapi/validate/spec_messages.go +++ b/vendor/github.com/go-openapi/validate/spec_messages.go @@ -187,6 +187,8 @@ const ( // UnusedResponseWarning ... UnusedResponseWarning = "response %q is not used anywhere" + + InvalidObject = "expected an object in %q.%s" ) // Additional error codes @@ -347,11 +349,15 @@ func invalidParameterDefinitionAsSchemaMsg(path, method, operationID string) err func parameterValidationTypeMismatchMsg(param, path, typ string) errors.Error { return errors.New(errors.CompositeErrorCode, ParamValidationTypeMismatch, param, path, typ) } +func invalidObjectMsg(path, in string) errors.Error { + return errors.New(errors.CompositeErrorCode, InvalidObject, path, in) +} // disabled -// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { -// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) -// } +// +// func invalidResponseDefinitionAsSchemaMsg(path, method string) errors.Error { +// return errors.New(errors.CompositeErrorCode, InvalidResponseDefinitionAsSchemaError, path, method) +// } func someParametersBrokenMsg(path, method, operationID string) errors.Error { return errors.New(errors.CompositeErrorCode, SomeParametersBrokenError, path, method, operationID) } diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go index 876467588..f87abb3d5 100644 --- a/vendor/github.com/go-openapi/validate/type.go +++ b/vendor/github.com/go-openapi/validate/type.go @@ -25,11 +25,34 @@ import ( ) type typeValidator struct { + Path string + In string Type spec.StringOrArray Nullable bool Format string - In string - Path string + Options *SchemaValidatorOptions +} + +func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, format string, opts *SchemaValidatorOptions) *typeValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var t *typeValidator + if opts.recycleValidators { + t = pools.poolOfTypeValidators.BorrowValidator() + } else { + t = new(typeValidator) + } + + t.Path = path + t.In = in + t.Type = typ + t.Nullable = nullable + t.Format = format + t.Options = opts + + return t } func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) { @@ -90,7 +113,7 @@ func (t *typeValidator) schemaInfoForType(data interface{}) (string, string) { default: val := reflect.ValueOf(data) tpe := val.Type() - switch tpe.Kind() { + switch tpe.Kind() { //nolint:exhaustive case reflect.Bool: return booleanType, "" case reflect.String: @@ -125,23 +148,33 @@ func (t *typeValidator) SetPath(path string) { t.Path = path } -func (t *typeValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (t *typeValidator) Applies(source interface{}, _ reflect.Kind) bool { // typeValidator applies to Schema, Parameter and Header objects - stpe := reflect.TypeOf(source) - r := (len(t.Type) > 0 || t.Format != "") && (stpe == specSchemaType || stpe == specParameterType || stpe == specHeaderType) - debugLog("type validator for %q applies %t for %T (kind: %v)\n", t.Path, r, source, kind) - return r + switch source.(type) { + case *spec.Schema: + case *spec.Parameter: + case *spec.Header: + default: + return false + } + + return (len(t.Type) > 0 || t.Format != "") } func (t *typeValidator) Validate(data interface{}) *Result { - result := new(Result) - result.Inc() + if t.Options.recycleValidators { + defer func() { + t.redeem() + }() + } + if data == nil { // nil or zero value for the passed structure require Type: null if len(t.Type) > 0 && !t.Type.Contains(nullType) && !t.Nullable { // TODO: if a property is not required it also passes this - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), nullType), t.Options.recycleResult) } - return result + + return emptyResult } // check if the type matches, should be used in every validator chain as first item @@ -151,8 +184,6 @@ func (t *typeValidator) Validate(data interface{}) *Result { // infer schema type (JSON) and format from passed data type schType, format := t.schemaInfoForType(data) - debugLog("path: %s, schType: %s, format: %s, expType: %s, expFmt: %s, kind: %s", t.Path, schType, format, t.Type, t.Format, val.Kind().String()) - // check numerical types // TODO: check unsigned ints // TODO: check json.Number (see schema.go) @@ -163,15 +194,20 @@ func (t *typeValidator) Validate(data interface{}) *Result { if kind != reflect.String && kind != reflect.Slice && t.Format != "" && !(t.Type.Contains(schType) || format == t.Format || isFloatInt || isIntFloat || isLowerInt || isLowerFloat) { // TODO: test case - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, t.Format, format), t.Options.recycleResult) } if !(t.Type.Contains(numberType) || t.Type.Contains(integerType)) && t.Format != "" && (kind == reflect.String || kind == reflect.Slice) { - return result + return emptyResult } if !(t.Type.Contains(schType) || isFloatInt || isIntFloat) { - return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType)) + return errorHelp.sErr(errors.InvalidType(t.Path, t.In, strings.Join(t.Type, ","), schType), t.Options.recycleResult) } - return result + + return emptyResult +} + +func (t *typeValidator) redeem() { + pools.poolOfTypeValidators.RedeemValidator(t) } diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go index 38cdb9bb6..c083aecc9 100644 --- a/vendor/github.com/go-openapi/validate/validator.go +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -39,20 +39,31 @@ type itemsValidator struct { root interface{} path string in string - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } -func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry) *itemsValidator { - iv := &itemsValidator{path: path, in: in, items: items, root: root, KnownFormats: formats} - iv.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{items.Type}), - Nullable: items.Nullable, - Format: items.Format, - In: in, - Path: path, - }, +func newItemsValidator(path, in string, items *spec.Items, root interface{}, formats strfmt.Registry, opts *SchemaValidatorOptions) *itemsValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var iv *itemsValidator + if opts.recycleValidators { + iv = pools.poolOfItemsValidators.BorrowValidator() + } else { + iv = new(itemsValidator) + } + + iv.path = path + iv.in = in + iv.items = items + iv.root = root + iv.KnownFormats = formats + iv.Options = opts + iv.validators = [6]valueValidator{ + iv.typeValidator(), iv.stringValidator(), iv.formatValidator(), iv.numberValidator(), @@ -63,77 +74,152 @@ func newItemsValidator(path, in string, items *spec.Items, root interface{}, for } func (i *itemsValidator) Validate(index int, data interface{}) *Result { + if i.Options.recycleValidators { + defer func() { + i.redeemChildren() + i.redeem() + }() + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() - mainResult := new(Result) + var result *Result + if i.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + path := fmt.Sprintf("%s.%d", i.path, index) - for _, validator := range i.validators { - validator.SetPath(path) - if validator.Applies(i.root, kind) { - result := validator.Validate(data) - mainResult.Merge(result) - mainResult.Inc() - if result != nil && result.HasErrors() { - return mainResult + for idx, validator := range i.validators { + if !validator.Applies(i.root, kind) { + if i.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // prevents further (unsafe) usage } + + continue + } + + validator.SetPath(path) + err := validator.Validate(data) + if i.Options.recycleValidators { + i.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + result.Inc() + if err.HasErrors() { + result.Merge(err) + + break + } + + result.Merge(err) } } - return mainResult + + return result +} + +func (i *itemsValidator) typeValidator() valueValidator { + return newTypeValidator( + i.path, + i.in, + spec.StringOrArray([]string{i.items.Type}), + i.items.Nullable, + i.items.Format, + i.Options, + ) } func (i *itemsValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - In: i.in, - Default: i.items.Default, - Enum: i.items.Enum, - } + return newBasicCommonValidator( + "", + i.in, + i.items.Default, + i.items.Enum, + i.Options, + ) } func (i *itemsValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - In: i.in, - Default: i.items.Default, - MaxItems: i.items.MaxItems, - MinItems: i.items.MinItems, - UniqueItems: i.items.UniqueItems, - Source: i.root, - Items: i.items.Items, - KnownFormats: i.KnownFormats, - } + return newBasicSliceValidator( + "", + i.in, + i.items.Default, + i.items.MaxItems, + i.items.MinItems, + i.items.UniqueItems, + i.items.Items, + i.root, + i.KnownFormats, + i.Options, + ) } func (i *itemsValidator) numberValidator() valueValidator { - return &numberValidator{ - In: i.in, - Default: i.items.Default, - MultipleOf: i.items.MultipleOf, - Maximum: i.items.Maximum, - ExclusiveMaximum: i.items.ExclusiveMaximum, - Minimum: i.items.Minimum, - ExclusiveMinimum: i.items.ExclusiveMinimum, - Type: i.items.Type, - Format: i.items.Format, - } + return newNumberValidator( + "", + i.in, + i.items.Default, + i.items.MultipleOf, + i.items.Maximum, + i.items.ExclusiveMaximum, + i.items.Minimum, + i.items.ExclusiveMinimum, + i.items.Type, + i.items.Format, + i.Options, + ) } func (i *itemsValidator) stringValidator() valueValidator { - return &stringValidator{ - In: i.in, - Default: i.items.Default, - MaxLength: i.items.MaxLength, - MinLength: i.items.MinLength, - Pattern: i.items.Pattern, - AllowEmptyValue: false, - } + return newStringValidator( + "", + i.in, + i.items.Default, + false, // Required + false, // AllowEmpty + i.items.MaxLength, + i.items.MinLength, + i.items.Pattern, + i.Options, + ) } func (i *itemsValidator) formatValidator() valueValidator { - return &formatValidator{ - In: i.in, - //Default: i.items.Default, - Format: i.items.Format, - KnownFormats: i.KnownFormats, + return newFormatValidator( + "", + i.in, + i.items.Format, + i.KnownFormats, + i.Options, + ) +} + +func (i *itemsValidator) redeem() { + pools.poolOfItemsValidators.RedeemValidator(i) +} + +func (i *itemsValidator) redeemChildren() { + for idx, validator := range i.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + i.validators[idx] = nil // free up allocated children if not in pool } } @@ -142,265 +228,501 @@ type basicCommonValidator struct { In string Default interface{} Enum []interface{} + Options *SchemaValidatorOptions +} + +func newBasicCommonValidator(path, in string, def interface{}, enum []interface{}, opts *SchemaValidatorOptions) *basicCommonValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var b *basicCommonValidator + if opts.recycleValidators { + b = pools.poolOfBasicCommonValidators.BorrowValidator() + } else { + b = new(basicCommonValidator) + } + + b.Path = path + b.In = in + b.Default = def + b.Enum = enum + b.Options = opts + + return b } func (b *basicCommonValidator) SetPath(path string) { b.Path = path } -func (b *basicCommonValidator) Applies(source interface{}, kind reflect.Kind) bool { +func (b *basicCommonValidator) Applies(source interface{}, _ reflect.Kind) bool { switch source.(type) { case *spec.Parameter, *spec.Schema, *spec.Header: return true + default: + return false } - return false } func (b *basicCommonValidator) Validate(data interface{}) (res *Result) { - if len(b.Enum) > 0 { - for _, enumValue := range b.Enum { - actualType := reflect.TypeOf(enumValue) - if actualType != nil { // Safeguard - expectedValue := reflect.ValueOf(data) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - if reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { - return nil - } - } - } - } - return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum)) + if b.Options.recycleValidators { + defer func() { + b.redeem() + }() } - return nil + + if len(b.Enum) == 0 { + return nil + } + + for _, enumValue := range b.Enum { + actualType := reflect.TypeOf(enumValue) + if actualType == nil { // Safeguard + continue + } + + expectedValue := reflect.ValueOf(data) + if expectedValue.IsValid() && + expectedValue.Type().ConvertibleTo(actualType) && + reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), enumValue) { + return nil + } + } + + return errorHelp.sErr(errors.EnumFail(b.Path, b.In, data, b.Enum), b.Options.recycleResult) +} + +func (b *basicCommonValidator) redeem() { + pools.poolOfBasicCommonValidators.RedeemValidator(b) } // A HeaderValidator has very limited subset of validations to apply type HeaderValidator struct { name string header *spec.Header - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } // NewHeaderValidator creates a new header validator object -func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry) *HeaderValidator { - p := &HeaderValidator{name: name, header: header, KnownFormats: formats} - p.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{header.Type}), - Nullable: header.Nullable, - Format: header.Format, - In: "header", - Path: name, - }, +func NewHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, options ...Option) *HeaderValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newHeaderValidator(name, header, formats, opts) +} + +func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registry, opts *SchemaValidatorOptions) *HeaderValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *HeaderValidator + if opts.recycleValidators { + p = pools.poolOfHeaderValidators.BorrowValidator() + } else { + p = new(HeaderValidator) + } + + p.name = name + p.header = header + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + name, + "header", + spec.StringOrArray([]string{header.Type}), + header.Nullable, + header.Format, + p.Options, + ), p.stringValidator(), p.formatValidator(), p.numberValidator(), p.sliceValidator(), p.commonValidator(), } + return p } // Validate the value of the header against its schema func (p *HeaderValidator) Validate(data interface{}) *Result { - result := new(Result) + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() - for _, validator := range p.validators { - if validator.Applies(p.header, kind) { - if err := validator.Validate(data); err != nil { - result.Merge(err) - if err.HasErrors() { - return result + for idx, validator := range p.validators { + if !validator.Applies(p.header, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // prevents further (unsafe) usage } + + continue + } + + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + if err.HasErrors() { + result.Merge(err) + break + } + result.Merge(err) } } - return nil + + return result } func (p *HeaderValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - Enum: p.header.Enum, - } + return newBasicCommonValidator( + p.name, + "response", + p.header.Default, + p.header.Enum, + p.Options, + ) } func (p *HeaderValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - MaxItems: p.header.MaxItems, - MinItems: p.header.MinItems, - UniqueItems: p.header.UniqueItems, - Items: p.header.Items, - Source: p.header, - KnownFormats: p.KnownFormats, - } + return newBasicSliceValidator( + p.name, + "response", + p.header.Default, + p.header.MaxItems, + p.header.MinItems, + p.header.UniqueItems, + p.header.Items, + p.header, + p.KnownFormats, + p.Options, + ) } func (p *HeaderValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - MultipleOf: p.header.MultipleOf, - Maximum: p.header.Maximum, - ExclusiveMaximum: p.header.ExclusiveMaximum, - Minimum: p.header.Minimum, - ExclusiveMinimum: p.header.ExclusiveMinimum, - Type: p.header.Type, - Format: p.header.Format, - } + return newNumberValidator( + p.name, + "response", + p.header.Default, + p.header.MultipleOf, + p.header.Maximum, + p.header.ExclusiveMaximum, + p.header.Minimum, + p.header.ExclusiveMinimum, + p.header.Type, + p.header.Format, + p.Options, + ) } func (p *HeaderValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: p.name, - In: "response", - Default: p.header.Default, - Required: true, - MaxLength: p.header.MaxLength, - MinLength: p.header.MinLength, - Pattern: p.header.Pattern, - AllowEmptyValue: false, - } + return newStringValidator( + p.name, + "response", + p.header.Default, + true, + false, + p.header.MaxLength, + p.header.MinLength, + p.header.Pattern, + p.Options, + ) } func (p *HeaderValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: p.name, - In: "response", - //Default: p.header.Default, - Format: p.header.Format, - KnownFormats: p.KnownFormats, + return newFormatValidator( + p.name, + "response", + p.header.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *HeaderValidator) redeem() { + pools.poolOfHeaderValidators.RedeemValidator(p) +} + +func (p *HeaderValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool } } // A ParamValidator has very limited subset of validations to apply type ParamValidator struct { param *spec.Parameter - validators []valueValidator + validators [6]valueValidator KnownFormats strfmt.Registry + Options *SchemaValidatorOptions } // NewParamValidator creates a new param validator object -func NewParamValidator(param *spec.Parameter, formats strfmt.Registry) *ParamValidator { - p := &ParamValidator{param: param, KnownFormats: formats} - p.validators = []valueValidator{ - &typeValidator{ - Type: spec.StringOrArray([]string{param.Type}), - Nullable: param.Nullable, - Format: param.Format, - In: param.In, - Path: param.Name, - }, +func NewParamValidator(param *spec.Parameter, formats strfmt.Registry, options ...Option) *ParamValidator { + opts := new(SchemaValidatorOptions) + for _, o := range options { + o(opts) + } + + return newParamValidator(param, formats, opts) +} + +func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *SchemaValidatorOptions) *ParamValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var p *ParamValidator + if opts.recycleValidators { + p = pools.poolOfParamValidators.BorrowValidator() + } else { + p = new(ParamValidator) + } + + p.param = param + p.KnownFormats = formats + p.Options = opts + p.validators = [6]valueValidator{ + newTypeValidator( + param.Name, + param.In, + spec.StringOrArray([]string{param.Type}), + param.Nullable, + param.Format, + p.Options, + ), p.stringValidator(), p.formatValidator(), p.numberValidator(), p.sliceValidator(), p.commonValidator(), } + return p } // Validate the data against the description of the parameter func (p *ParamValidator) Validate(data interface{}) *Result { - result := new(Result) + if data == nil { + return nil + } + + var result *Result + if p.Options.recycleResult { + result = pools.poolOfResults.BorrowResult() + } else { + result = new(Result) + } + tpe := reflect.TypeOf(data) kind := tpe.Kind() + if p.Options.recycleValidators { + defer func() { + p.redeemChildren() + p.redeem() + }() + } + // TODO: validate type - for _, validator := range p.validators { - if validator.Applies(p.param, kind) { - if err := validator.Validate(data); err != nil { - result.Merge(err) - if err.HasErrors() { - return result + for idx, validator := range p.validators { + if !validator.Applies(p.param, kind) { + if p.Options.recycleValidators { + // Validate won't be called, so relinquish this validator + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // prevents further (unsafe) usage } + + continue + } + + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + if err.HasErrors() { + result.Merge(err) + break + } + result.Merge(err) } } - return nil + + return result } func (p *ParamValidator) commonValidator() valueValidator { - return &basicCommonValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - Enum: p.param.Enum, - } + return newBasicCommonValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Enum, + p.Options, + ) } func (p *ParamValidator) sliceValidator() valueValidator { - return &basicSliceValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - MaxItems: p.param.MaxItems, - MinItems: p.param.MinItems, - UniqueItems: p.param.UniqueItems, - Items: p.param.Items, - Source: p.param, - KnownFormats: p.KnownFormats, - } + return newBasicSliceValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MaxItems, + p.param.MinItems, + p.param.UniqueItems, + p.param.Items, + p.param, + p.KnownFormats, + p.Options, + ) } func (p *ParamValidator) numberValidator() valueValidator { - return &numberValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - MultipleOf: p.param.MultipleOf, - Maximum: p.param.Maximum, - ExclusiveMaximum: p.param.ExclusiveMaximum, - Minimum: p.param.Minimum, - ExclusiveMinimum: p.param.ExclusiveMinimum, - Type: p.param.Type, - Format: p.param.Format, - } + return newNumberValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.MultipleOf, + p.param.Maximum, + p.param.ExclusiveMaximum, + p.param.Minimum, + p.param.ExclusiveMinimum, + p.param.Type, + p.param.Format, + p.Options, + ) } func (p *ParamValidator) stringValidator() valueValidator { - return &stringValidator{ - Path: p.param.Name, - In: p.param.In, - Default: p.param.Default, - AllowEmptyValue: p.param.AllowEmptyValue, - Required: p.param.Required, - MaxLength: p.param.MaxLength, - MinLength: p.param.MinLength, - Pattern: p.param.Pattern, - } + return newStringValidator( + p.param.Name, + p.param.In, + p.param.Default, + p.param.Required, + p.param.AllowEmptyValue, + p.param.MaxLength, + p.param.MinLength, + p.param.Pattern, + p.Options, + ) } func (p *ParamValidator) formatValidator() valueValidator { - return &formatValidator{ - Path: p.param.Name, - In: p.param.In, - //Default: p.param.Default, - Format: p.param.Format, - KnownFormats: p.KnownFormats, + return newFormatValidator( + p.param.Name, + p.param.In, + p.param.Format, + p.KnownFormats, + p.Options, + ) +} + +func (p *ParamValidator) redeem() { + pools.poolOfParamValidators.RedeemValidator(p) +} + +func (p *ParamValidator) redeemChildren() { + for idx, validator := range p.validators { + if validator == nil { + continue + } + if redeemableChildren, ok := validator.(interface{ redeemChildren() }); ok { + redeemableChildren.redeemChildren() + } + if redeemable, ok := validator.(interface{ redeem() }); ok { + redeemable.redeem() + } + p.validators[idx] = nil // free up allocated children if not in pool } } type basicSliceValidator struct { - Path string - In string - Default interface{} - MaxItems *int64 - MinItems *int64 - UniqueItems bool - Items *spec.Items - Source interface{} - itemsValidator *itemsValidator - KnownFormats strfmt.Registry + Path string + In string + Default interface{} + MaxItems *int64 + MinItems *int64 + UniqueItems bool + Items *spec.Items + Source interface{} + KnownFormats strfmt.Registry + Options *SchemaValidatorOptions +} + +func newBasicSliceValidator( + path, in string, + def interface{}, maxItems, minItems *int64, uniqueItems bool, items *spec.Items, + source interface{}, formats strfmt.Registry, + opts *SchemaValidatorOptions) *basicSliceValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *basicSliceValidator + if opts.recycleValidators { + s = pools.poolOfBasicSliceValidators.BorrowValidator() + } else { + s = new(basicSliceValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.MaxItems = maxItems + s.MinItems = minItems + s.UniqueItems = uniqueItems + s.Items = items + s.Source = source + s.KnownFormats = formats + s.Options = opts + + return s } func (s *basicSliceValidator) SetPath(path string) { @@ -411,60 +733,61 @@ func (s *basicSliceValidator) Applies(source interface{}, kind reflect.Kind) boo switch source.(type) { case *spec.Parameter, *spec.Items, *spec.Header: return kind == reflect.Slice + default: + return false } - return false } func (s *basicSliceValidator) Validate(data interface{}) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } val := reflect.ValueOf(data) size := int64(val.Len()) if s.MinItems != nil { if err := MinItems(s.Path, s.In, size, *s.MinItems); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MaxItems != nil { if err := MaxItems(s.Path, s.In, size, *s.MaxItems); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.UniqueItems { if err := UniqueItems(s.Path, s.In, data); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } - if s.itemsValidator == nil && s.Items != nil { - s.itemsValidator = newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats) + if s.Items == nil { + return nil } - if s.itemsValidator != nil { - for i := 0; i < int(size); i++ { - ele := val.Index(i) - if err := s.itemsValidator.Validate(i, ele.Interface()); err != nil && err.HasErrors() { + for i := 0; i < int(size); i++ { + itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options) + ele := val.Index(i) + if err := itemsValidator.Validate(i, ele.Interface()); err != nil { + if err.HasErrors() { return err } + if err.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(err) + } } } + return nil } -/* unused -func (s *basicSliceValidator) hasDuplicates(value reflect.Value, size int) bool { - dict := make(map[interface{}]struct{}) - for i := 0; i < size; i++ { - ele := value.Index(i) - if _, ok := dict[ele.Interface()]; ok { - return true - } - dict[ele.Interface()] = struct{}{} - } - return false +func (s *basicSliceValidator) redeem() { + pools.poolOfBasicSliceValidators.RedeemValidator(s) } -*/ type numberValidator struct { Path string @@ -476,8 +799,40 @@ type numberValidator struct { Minimum *float64 ExclusiveMinimum bool // Allows for more accurate behavior regarding integers - Type string - Format string + Type string + Format string + Options *SchemaValidatorOptions +} + +func newNumberValidator( + path, in string, def interface{}, + multipleOf, maximum *float64, exclusiveMaximum bool, minimum *float64, exclusiveMinimum bool, + typ, format string, + opts *SchemaValidatorOptions) *numberValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var n *numberValidator + if opts.recycleValidators { + n = pools.poolOfNumberValidators.BorrowValidator() + } else { + n = new(numberValidator) + } + + n.Path = path + n.In = in + n.Default = def + n.MultipleOf = multipleOf + n.Maximum = maximum + n.ExclusiveMaximum = exclusiveMaximum + n.Minimum = minimum + n.ExclusiveMinimum = exclusiveMinimum + n.Type = typ + n.Format = format + n.Options = opts + + return n } func (n *numberValidator) SetPath(path string) { @@ -489,12 +844,10 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool { case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: isInt := kind >= reflect.Int && kind <= reflect.Uint64 isFloat := kind == reflect.Float32 || kind == reflect.Float64 - r := isInt || isFloat - debugLog("schema props validator for %q applies %t for %T (kind: %v) isInt=%t, isFloat=%t\n", n.Path, r, source, kind, isInt, isFloat) - return r + return isInt || isFloat + default: + return false } - debugLog("schema props validator for %q applies %t for %T (kind: %v)\n", n.Path, false, source, kind) - return false } // Validate provides a validator for generic JSON numbers, @@ -519,11 +872,18 @@ func (n *numberValidator) Applies(source interface{}, kind reflect.Kind) bool { // // TODO: default boundaries with MAX_SAFE_INTEGER are not checked (specific to json.Number?) func (n *numberValidator) Validate(val interface{}) *Result { - res := new(Result) + if n.Options.recycleValidators { + defer func() { + n.redeem() + }() + } - resMultiple := new(Result) - resMinimum := new(Result) - resMaximum := new(Result) + var res, resMultiple, resMinimum, resMaximum *Result + if n.Options.recycleResult { + res = pools.poolOfResults.BorrowResult() + } else { + res = new(Result) + } // Used only to attempt to validate constraint on value, // even though value or constraint specified do not match type and format @@ -533,68 +893,106 @@ func (n *numberValidator) Validate(val interface{}) *Result { res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path)) if n.MultipleOf != nil { + resMultiple = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path)) if resMultiple.IsValid() { // Constraint validated with compatible types if err := MultipleOfNativeType(n.Path, n.In, val, *n.MultipleOf); err != nil { - resMultiple.Merge(errorHelp.sErr(err)) + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := MultipleOf(n.Path, n.In, data, *n.MultipleOf); err != nil { - resMultiple.Merge(errorHelp.sErr(err)) + resMultiple.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } - // nolint: dupl if n.Maximum != nil { + resMaximum = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path)) if resMaximum.IsValid() { // Constraint validated with compatible types if err := MaximumNativeType(n.Path, n.In, val, *n.Maximum, n.ExclusiveMaximum); err != nil { - resMaximum.Merge(errorHelp.sErr(err)) + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := Maximum(n.Path, n.In, data, *n.Maximum, n.ExclusiveMaximum); err != nil { - resMaximum.Merge(errorHelp.sErr(err)) + resMaximum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } - // nolint: dupl if n.Minimum != nil { + resMinimum = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path)) if resMinimum.IsValid() { // Constraint validated with compatible types if err := MinimumNativeType(n.Path, n.In, val, *n.Minimum, n.ExclusiveMinimum); err != nil { - resMinimum.Merge(errorHelp.sErr(err)) + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } else { // Constraint nevertheless validated, converted as general number if err := Minimum(n.Path, n.In, data, *n.Minimum, n.ExclusiveMinimum); err != nil { - resMinimum.Merge(errorHelp.sErr(err)) + resMinimum.Merge(errorHelp.sErr(err, n.Options.recycleResult)) } } } res.Merge(resMultiple, resMinimum, resMaximum) res.Inc() + return res } +func (n *numberValidator) redeem() { + pools.poolOfNumberValidators.RedeemValidator(n) +} + type stringValidator struct { + Path string + In string Default interface{} Required bool AllowEmptyValue bool MaxLength *int64 MinLength *int64 Pattern string - Path string - In string + Options *SchemaValidatorOptions +} + +func newStringValidator( + path, in string, + def interface{}, required, allowEmpty bool, maxLength, minLength *int64, pattern string, + opts *SchemaValidatorOptions) *stringValidator { + if opts == nil { + opts = new(SchemaValidatorOptions) + } + + var s *stringValidator + if opts.recycleValidators { + s = pools.poolOfStringValidators.BorrowValidator() + } else { + s = new(stringValidator) + } + + s.Path = path + s.In = in + s.Default = def + s.Required = required + s.AllowEmptyValue = allowEmpty + s.MaxLength = maxLength + s.MinLength = minLength + s.Pattern = pattern + s.Options = opts + + return s } func (s *stringValidator) SetPath(path string) { @@ -604,42 +1002,50 @@ func (s *stringValidator) SetPath(path string) { func (s *stringValidator) Applies(source interface{}, kind reflect.Kind) bool { switch source.(type) { case *spec.Parameter, *spec.Schema, *spec.Items, *spec.Header: - r := kind == reflect.String - debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, r, source, kind) - return r + return kind == reflect.String + default: + return false } - debugLog("string validator for %q applies %t for %T (kind: %v)\n", s.Path, false, source, kind) - return false } func (s *stringValidator) Validate(val interface{}) *Result { + if s.Options.recycleValidators { + defer func() { + s.redeem() + }() + } + data, ok := val.(string) if !ok { - return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val)) + return errorHelp.sErr(errors.InvalidType(s.Path, s.In, stringType, val), s.Options.recycleResult) } if s.Required && !s.AllowEmptyValue && (s.Default == nil || s.Default == "") { if err := RequiredString(s.Path, s.In, data); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MaxLength != nil { if err := MaxLength(s.Path, s.In, data, *s.MaxLength); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.MinLength != nil { if err := MinLength(s.Path, s.In, data, *s.MinLength); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } if s.Pattern != "" { if err := Pattern(s.Path, s.In, data, s.Pattern); err != nil { - return errorHelp.sErr(err) + return errorHelp.sErr(err, s.Options.recycleResult) } } return nil } + +func (s *stringValidator) redeem() { + pools.poolOfStringValidators.RedeemValidator(s) +} diff --git a/vendor/github.com/go-openapi/validate/values.go b/vendor/github.com/go-openapi/validate/values.go index e7ad8c103..5f6f5ee61 100644 --- a/vendor/github.com/go-openapi/validate/values.go +++ b/vendor/github.com/go-openapi/validate/values.go @@ -120,7 +120,7 @@ func UniqueItems(path, in string, data interface{}) *errors.Validation { // MinLength validates a string for minimum length func MinLength(path, in, data string, minLength int64) *errors.Validation { - strLen := int64(utf8.RuneCount([]byte(data))) + strLen := int64(utf8.RuneCountInString(data)) if strLen < minLength { return errors.TooShort(path, in, minLength, data) } @@ -129,7 +129,7 @@ func MinLength(path, in, data string, minLength int64) *errors.Validation { // MaxLength validates a string for maximum length func MaxLength(path, in, data string, maxLength int64) *errors.Validation { - strLen := int64(utf8.RuneCount([]byte(data))) + strLen := int64(utf8.RuneCountInString(data)) if strLen > maxLength { return errors.TooLong(path, in, maxLength, data) } @@ -315,7 +315,7 @@ func FormatOf(path, in, format, data string, registry strfmt.Registry) *errors.V // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MaximumNativeType(path, in string, val interface{}, max float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MaximumInt(path, in, value, int64(max), exclusive) @@ -345,7 +345,7 @@ func MaximumNativeType(path, in string, val interface{}, max float64, exclusive // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MinimumNativeType(path, in string, val interface{}, min float64, exclusive bool) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MinimumInt(path, in, value, int64(min), exclusive) @@ -375,7 +375,7 @@ func MinimumNativeType(path, in string, val interface{}, min float64, exclusive // TODO: Normally, a JSON MAX_SAFE_INTEGER check would ensure conversion remains loss-free func MultipleOfNativeType(path, in string, val interface{}, multipleOf float64) *errors.Validation { kind := reflect.ValueOf(val).Type().Kind() - switch kind { + switch kind { //nolint:exhaustive case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value := valueHelp.asInt64(val) return MultipleOfInt(path, in, value, int64(multipleOf)) @@ -399,7 +399,7 @@ func IsValueValidAgainstRange(val interface{}, typeName, format, prefix, path st // What is the string representation of val var stringRep string - switch kind { + switch kind { //nolint:exhaustive case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: stringRep = swag.FormatUint64(valueHelp.asUint64(val)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index d51736e7e..433693a69 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.12.4" + "v2": "2.12.5" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index 7e36eb48f..b64522dfe 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [2.12.5](https://github.com/googleapis/gax-go/compare/v2.12.4...v2.12.5) (2024-06-18) + + +### Bug Fixes + +* **v2/apierror:** fix (*APIError).Error() for unwrapped Status ([#351](https://github.com/googleapis/gax-go/issues/351)) ([22c16e7](https://github.com/googleapis/gax-go/commit/22c16e7bff5402bdc4c25063771cdd01c650b500)), refs [#350](https://github.com/googleapis/gax-go/issues/350) + ## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index d785a065c..7de60773d 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -206,8 +206,10 @@ func (a *APIError) Error() string { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) - } else if a.status != nil { + } else if a.status != nil && a.err != nil { msg = a.err.Error() + } else if a.status != nil { + msg = a.status.Message() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 3e53729e5..f5273985a 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -163,11 +163,38 @@ func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { out = metadata.MD(make(map[string][]string)) } headers := callctx.HeadersFromContext(ctx) - for k, v := range headers { - out[k] = append(out[k], v...) + + // x-goog-api-client is a special case that we want to make sure gets merged + // into a single header. + const xGoogHeader = "x-goog-api-client" + var mergedXgoogHeader strings.Builder + + for k, vals := range headers { + if k == xGoogHeader { + // Merge all values for the x-goog-api-client header set on the ctx. + for _, v := range vals { + mergedXgoogHeader.WriteString(v) + mergedXgoogHeader.WriteRune(' ') + } + continue + } + out[k] = append(out[k], vals...) } for i := 0; i < len(keyvals); i = i + 2 { out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + + if keyvals[i] == xGoogHeader { + // Merge the x-goog-api-client header values set on the ctx with any + // values passed in for it from the client. + mergedXgoogHeader.WriteString(keyvals[i+1]) + mergedXgoogHeader.WriteRune(' ') + } } + + // Add the x goog header back in, replacing the separate values that were set. + if mergedXgoogHeader.Len() > 0 { + out[xGoogHeader] = []string{mergedXgoogHeader.String()[:mergedXgoogHeader.Len()-1]} + } + return out } diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 3006ad7bd..4f780f463 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.12.4" +const Version = "2.12.5" diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md index 5f16dd140..6d48174bf 100644 --- a/vendor/github.com/hashicorp/go-version/CHANGELOG.md +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -1,3 +1,22 @@ +# 1.7.0 (May 24, 2024) + +ENHANCEMENTS: + +- Remove `reflect` dependency ([#91](https://github.com/hashicorp/go-version/pull/91)) +- Implement the `database/sql.Scanner` and `database/sql/driver.Value` interfaces for `Version` ([#133](https://github.com/hashicorp/go-version/pull/133)) + +INTERNAL: + +- [COMPLIANCE] Add Copyright and License Headers ([#115](https://github.com/hashicorp/go-version/pull/115)) +- [COMPLIANCE] Update MPL-2.0 LICENSE ([#105](https://github.com/hashicorp/go-version/pull/105)) +- Bump actions/cache from 3.0.11 to 3.2.5 ([#116](https://github.com/hashicorp/go-version/pull/116)) +- Bump actions/checkout from 3.2.0 to 3.3.0 ([#111](https://github.com/hashicorp/go-version/pull/111)) +- Bump actions/upload-artifact from 3.1.1 to 3.1.2 ([#112](https://github.com/hashicorp/go-version/pull/112)) +- GHA Migration ([#103](https://github.com/hashicorp/go-version/pull/103)) +- github: Pin external GitHub Actions to hashes ([#107](https://github.com/hashicorp/go-version/pull/107)) +- SEC-090: Automated trusted workflow pinning (2023-04-05) ([#124](https://github.com/hashicorp/go-version/pull/124)) +- update readme ([#104](https://github.com/hashicorp/go-version/pull/104)) + # 1.6.0 (June 28, 2022) FEATURES: diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE index c33dcc7c9..1409d6ab9 100644 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2014 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 4d2505090..4b7806cd9 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,5 @@ # Versioning Library for Go -[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) +![Build Status](https://github.com/hashicorp/go-version/actions/workflows/go-tests.yml/badge.svg) [![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index da5d1aca1..29bdc4d2b 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -1,8 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package version import ( "fmt" - "reflect" "regexp" "sort" "strings" @@ -199,7 +201,7 @@ func prereleaseCheck(v, c *Version) bool { case cPre && vPre: // A constraint with a pre-release can only match a pre-release version // with the same base segments. - return reflect.DeepEqual(c.Segments64(), v.Segments64()) + return v.equalSegments(c) case !cPre && vPre: // A constraint without a pre-release can only match a version without a diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index e87df6990..7c683c281 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -1,9 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package version import ( "bytes" + "database/sql/driver" "fmt" - "reflect" "regexp" "strconv" "strings" @@ -117,11 +120,8 @@ func (v *Version) Compare(other *Version) int { return 0 } - segmentsSelf := v.Segments64() - segmentsOther := other.Segments64() - // If the segments are the same, we must compare on prerelease info - if reflect.DeepEqual(segmentsSelf, segmentsOther) { + if v.equalSegments(other) { preSelf := v.Prerelease() preOther := other.Prerelease() if preSelf == "" && preOther == "" { @@ -137,6 +137,8 @@ func (v *Version) Compare(other *Version) int { return comparePrereleases(preSelf, preOther) } + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() // Get the highest specificity (hS), or if they're equal, just use segmentSelf length lenSelf := len(segmentsSelf) lenOther := len(segmentsOther) @@ -160,7 +162,7 @@ func (v *Version) Compare(other *Version) int { // this means Other had the lower specificity // Check to see if the remaining segments in Self are all zeros - if !allZero(segmentsSelf[i:]) { - //if not, it means that Self has to be greater than Other + // if not, it means that Self has to be greater than Other return 1 } break @@ -180,6 +182,21 @@ func (v *Version) Compare(other *Version) int { return 0 } +func (v *Version) equalSegments(other *Version) bool { + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + if len(segmentsSelf) != len(segmentsOther) { + return false + } + for i, v := range segmentsSelf { + if v != segmentsOther[i] { + return false + } + } + return true +} + func allZero(segs []int64) bool { for _, s := range segs { if s != 0 { @@ -405,3 +422,20 @@ func (v *Version) UnmarshalText(b []byte) error { func (v *Version) MarshalText() ([]byte, error) { return []byte(v.String()), nil } + +// Scan implements the sql.Scanner interface. +func (v *Version) Scan(src interface{}) error { + switch src := src.(type) { + case string: + return v.UnmarshalText([]byte(src)) + case nil: + return nil + default: + return fmt.Errorf("cannot scan %T as Version", src) + } +} + +// Value implements the driver.Valuer interface. +func (v *Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go index cc888d43e..83547fe13 100644 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package version // Collection is a type that implements the sort.Interface interface diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 1f72cdde1..05c7359e4 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -55,6 +55,10 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -93,6 +97,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -560,6 +565,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a0..2754bac6f 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce601..03744fbc7 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925ad..32a7f401d 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21e..bbca17234 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 87f42879a..4613724e9 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e0..a4f5bf91f 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml deleted file mode 100644 index ebd5edd89..000000000 --- a/vendor/github.com/mattn/go-shellwords/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -arch: - - amd64 - - ppc64le -language: go -sudo: false -go: - - tip - -before_install: - - go get -t -v ./... - -script: - - ./go.test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE deleted file mode 100644 index 740fa9313..000000000 --- a/vendor/github.com/mattn/go-shellwords/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md deleted file mode 100644 index bdd531918..000000000 --- a/vendor/github.com/mattn/go-shellwords/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# go-shellwords - -[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords) -[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) -[![PkgGoDev](https://pkg.go.dev/badge/github.com/mattn/go-shellwords)](https://pkg.go.dev/github.com/mattn/go-shellwords) -[![ci](https://github.com/mattn/go-shellwords/ci/badge.svg)](https://github.com/mattn/go-shellwords/actions) - -Parse line as shell words. - -## Usage - -```go -args, err := shellwords.Parse("./foo --bar=baz") -// args should be ["./foo", "--bar=baz"] -``` - -```go -envs, args, err := shellwords.ParseWithEnvs("FOO=foo BAR=baz ./foo --bar=baz") -// envs should be ["FOO=foo", "BAR=baz"] -// args should be ["./foo", "--bar=baz"] -``` - -```go -os.Setenv("FOO", "bar") -p := shellwords.NewParser() -p.ParseEnv = true -args, err := p.Parse("./foo $FOO") -// args should be ["./foo", "bar"] -``` - -```go -p := shellwords.NewParser() -p.ParseBacktick = true -args, err := p.Parse("./foo `echo $SHELL`") -// args should be ["./foo", "/bin/bash"] -``` - -```go -shellwords.ParseBacktick = true -p := shellwords.NewParser() -args, err := p.Parse("./foo `echo $SHELL`") -// args should be ["./foo", "/bin/bash"] -``` - -# Thanks - -This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). - -# License - -under the MIT License: http://mattn.mit-license.org/2017 - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/go.test.sh b/vendor/github.com/mattn/go-shellwords/go.test.sh deleted file mode 100644 index a7deaca96..000000000 --- a/vendor/github.com/mattn/go-shellwords/go.test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash - -set -e -echo "" > coverage.txt - -for d in $(go list ./... | grep -v vendor); do - go test -coverprofile=profile.out -covermode=atomic "$d" - if [ -f profile.out ]; then - cat profile.out >> coverage.txt - rm profile.out - fi -done diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go deleted file mode 100644 index 1b42a0017..000000000 --- a/vendor/github.com/mattn/go-shellwords/shellwords.go +++ /dev/null @@ -1,317 +0,0 @@ -package shellwords - -import ( - "bytes" - "errors" - "os" - "strings" - "unicode" -) - -var ( - ParseEnv bool = false - ParseBacktick bool = false -) - -func isSpace(r rune) bool { - switch r { - case ' ', '\t', '\r', '\n': - return true - } - return false -} - -func replaceEnv(getenv func(string) string, s string) string { - if getenv == nil { - getenv = os.Getenv - } - - var buf bytes.Buffer - rs := []rune(s) - for i := 0; i < len(rs); i++ { - r := rs[i] - if r == '\\' { - i++ - if i == len(rs) { - break - } - buf.WriteRune(rs[i]) - continue - } else if r == '$' { - i++ - if i == len(rs) { - buf.WriteRune(r) - break - } - if rs[i] == 0x7b { - i++ - p := i - for ; i < len(rs); i++ { - r = rs[i] - if r == '\\' { - i++ - if i == len(rs) { - return s - } - continue - } - if r == 0x7d || (!unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r)) { - break - } - } - if r != 0x7d { - return s - } - if i > p { - buf.WriteString(getenv(s[p:i])) - } - } else { - p := i - for ; i < len(rs); i++ { - r := rs[i] - if r == '\\' { - i++ - if i == len(rs) { - return s - } - continue - } - if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) { - break - } - } - if i > p { - buf.WriteString(getenv(s[p:i])) - i-- - } else { - buf.WriteString(s[p:]) - } - } - } else { - buf.WriteRune(r) - } - } - return buf.String() -} - -type Parser struct { - ParseEnv bool - ParseBacktick bool - Position int - Dir string - - // If ParseEnv is true, use this for getenv. - // If nil, use os.Getenv. - Getenv func(string) string -} - -func NewParser() *Parser { - return &Parser{ - ParseEnv: ParseEnv, - ParseBacktick: ParseBacktick, - Position: 0, - Dir: "", - } -} - -type argType int - -const ( - argNo argType = iota - argSingle - argQuoted -) - -func (p *Parser) Parse(line string) ([]string, error) { - args := []string{} - buf := "" - var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool - backtick := "" - - pos := -1 - got := argNo - - i := -1 -loop: - for _, r := range line { - i++ - if escaped { - buf += string(r) - escaped = false - got = argSingle - continue - } - - if r == '\\' { - if singleQuoted { - buf += string(r) - } else { - escaped = true - } - continue - } - - if isSpace(r) { - if singleQuoted || doubleQuoted || backQuote || dollarQuote { - buf += string(r) - backtick += string(r) - } else if got != argNo { - if p.ParseEnv { - if got == argSingle { - parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} - strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) - if err != nil { - return nil, err - } - args = append(args, strs...) - } else { - args = append(args, replaceEnv(p.Getenv, buf)) - } - } else { - args = append(args, buf) - } - buf = "" - got = argNo - } - continue - } - - switch r { - case '`': - if !singleQuoted && !doubleQuoted && !dollarQuote { - if p.ParseBacktick { - if backQuote { - out, err := shellRun(backtick, p.Dir) - if err != nil { - return nil, err - } - buf = buf[:len(buf)-len(backtick)] + out - } - backtick = "" - backQuote = !backQuote - continue - } - backtick = "" - backQuote = !backQuote - } - case ')': - if !singleQuoted && !doubleQuoted && !backQuote { - if p.ParseBacktick { - if dollarQuote { - out, err := shellRun(backtick, p.Dir) - if err != nil { - return nil, err - } - buf = buf[:len(buf)-len(backtick)-2] + out - } - backtick = "" - dollarQuote = !dollarQuote - continue - } - backtick = "" - dollarQuote = !dollarQuote - } - case '(': - if !singleQuoted && !doubleQuoted && !backQuote { - if !dollarQuote && strings.HasSuffix(buf, "$") { - dollarQuote = true - buf += "(" - continue - } else { - return nil, errors.New("invalid command line string") - } - } - case '"': - if !singleQuoted && !dollarQuote { - if doubleQuoted { - got = argQuoted - } - doubleQuoted = !doubleQuoted - continue - } - case '\'': - if !doubleQuoted && !dollarQuote { - if singleQuoted { - got = argQuoted - } - singleQuoted = !singleQuoted - continue - } - case ';', '&', '|', '<', '>': - if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { - if r == '>' && len(buf) > 0 { - if c := buf[0]; '0' <= c && c <= '9' { - i -= 1 - got = argNo - } - } - pos = i - break loop - } - } - - got = argSingle - buf += string(r) - if backQuote || dollarQuote { - backtick += string(r) - } - } - - if got != argNo { - if p.ParseEnv { - if got == argSingle { - parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} - strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) - if err != nil { - return nil, err - } - args = append(args, strs...) - } else { - args = append(args, replaceEnv(p.Getenv, buf)) - } - } else { - args = append(args, buf) - } - } - - if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { - return nil, errors.New("invalid command line string") - } - - p.Position = pos - - return args, nil -} - -func (p *Parser) ParseWithEnvs(line string) (envs []string, args []string, err error) { - _args, err := p.Parse(line) - if err != nil { - return nil, nil, err - } - envs = []string{} - args = []string{} - parsingEnv := true - for _, arg := range _args { - if parsingEnv && isEnv(arg) { - envs = append(envs, arg) - } else { - if parsingEnv { - parsingEnv = false - } - args = append(args, arg) - } - } - return envs, args, nil -} - -func isEnv(arg string) bool { - return len(strings.Split(arg, "=")) == 2 -} - -func Parse(line string) ([]string, error) { - return NewParser().Parse(line) -} - -func ParseWithEnvs(line string) (envs []string, args []string, err error) { - return NewParser().ParseWithEnvs(line) -} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go deleted file mode 100644 index b56a90120..000000000 --- a/vendor/github.com/mattn/go-shellwords/util_posix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !windows - -package shellwords - -import ( - "fmt" - "os" - "os/exec" - "strings" -) - -func shellRun(line, dir string) (string, error) { - var shell string - if shell = os.Getenv("SHELL"); shell == "" { - shell = "/bin/sh" - } - cmd := exec.Command(shell, "-c", line) - if dir != "" { - cmd.Dir = dir - } - b, err := cmd.Output() - if err != nil { - if eerr, ok := err.(*exec.ExitError); ok { - b = eerr.Stderr - } - return "", fmt.Errorf("%s: %w", string(b), err) - } - return strings.TrimSpace(string(b)), nil -} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go deleted file mode 100644 index fd738a721..000000000 --- a/vendor/github.com/mattn/go-shellwords/util_windows.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build windows - -package shellwords - -import ( - "fmt" - "os" - "os/exec" - "strings" -) - -func shellRun(line, dir string) (string, error) { - var shell string - if shell = os.Getenv("COMSPEC"); shell == "" { - shell = "cmd" - } - cmd := exec.Command(shell, "/c", line) - if dir != "" { - cmd.Dir = dir - } - b, err := cmd.Output() - if err != nil { - if eerr, ok := err.(*exec.ExitError); ok { - b = eerr.Stderr - } - return "", fmt.Errorf("%s: %w", string(b), err) - } - return strings.TrimSpace(string(b)), nil -} diff --git a/vendor/github.com/moby/docker-image-spec/LICENSE b/vendor/github.com/moby/docker-image-spec/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/moby/docker-image-spec/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go b/vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go similarity index 100% rename from vendor/github.com/docker/docker/image/spec/specs-go/v1/image.go rename to vendor/github.com/moby/docker-image-spec/specs-go/v1/image.go diff --git a/vendor/github.com/osbuild/images/pkg/customizations/oscap/oscap.go b/vendor/github.com/osbuild/images/pkg/customizations/oscap/oscap.go index 1e70665d9..a4f11b4b2 100644 --- a/vendor/github.com/osbuild/images/pkg/customizations/oscap/oscap.go +++ b/vendor/github.com/osbuild/images/pkg/customizations/oscap/oscap.go @@ -1,11 +1,7 @@ package oscap import ( - "fmt" - "path/filepath" "strings" - - "github.com/osbuild/images/pkg/customizations/fsnode" ) type Profile string @@ -43,10 +39,24 @@ const ( defaultRHEL8Datastream string = "/usr/share/xml/scap/ssg/content/ssg-rhel8-ds.xml" defaultRHEL9Datastream string = "/usr/share/xml/scap/ssg/content/ssg-rhel9-ds.xml" - // tailoring directory path - tailoringDirPath string = "/usr/share/xml/osbuild-openscap-data" + // oscap related directories + DataDir string = "/oscap_data" ) +type RemediationConfig struct { + Datastream string + ProfileID string + TailoringPath string + CompressionEnabled bool +} + +type TailoringConfig struct { + RemediationConfig + TailoredProfileID string + Selected []string + Unselected []string +} + func DefaultFedoraDatastream() string { return defaultFedoraDatastream } @@ -80,15 +90,3 @@ func IsProfileAllowed(profile string, allowlist []Profile) bool { return false } - -func GetTailoringFile(profile string) (string, string, *fsnode.Directory, error) { - newProfile := fmt.Sprintf("%s_osbuild_tailoring", profile) - path := filepath.Join(tailoringDirPath, "tailoring.xml") - - tailoringDir, err := fsnode.NewDirectory(tailoringDirPath, nil, nil, nil, true) - if err != nil { - return "", "", nil, err - } - - return newProfile, path, tailoringDir, nil -} diff --git a/vendor/github.com/osbuild/images/pkg/distro/fedora/distro.go b/vendor/github.com/osbuild/images/pkg/distro/fedora/distro.go index 16bbac5b8..246814a34 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/fedora/distro.go +++ b/vendor/github.com/osbuild/images/pkg/distro/fedora/distro.go @@ -41,9 +41,6 @@ const ( // Added kernel command line options for iot-raw-image and iot-qcow2-image types ostreeDeploymentKernelOptions = "modprobe.blacklist=vc4 rw coreos.no_persist_ip" - - // location for saving openscap remediation data - oscapDataDir = "/oscap_data" ) var ( diff --git a/vendor/github.com/osbuild/images/pkg/distro/fedora/images.go b/vendor/github.com/osbuild/images/pkg/distro/fedora/images.go index 171827e4d..2337a49f4 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/fedora/images.go +++ b/vendor/github.com/osbuild/images/pkg/distro/fedora/images.go @@ -3,6 +3,7 @@ package fedora import ( "fmt" "math/rand" + "path/filepath" "github.com/osbuild/images/internal/common" "github.com/osbuild/images/internal/workload" @@ -183,14 +184,11 @@ func osCustomizations( panic("unexpected oscap options for ostree image type") } - // although the osbuild stage will create this directory, - // it's probably better to ensure that it is created here - dataDirNode, err := fsnode.NewDirectory(oscapDataDir, nil, nil, nil, true) + oscapDataNode, err := fsnode.NewDirectory(oscap.DataDir, nil, nil, nil, true) if err != nil { - panic("unexpected error creating OpenSCAP data directory") + panic(fmt.Sprintf("unexpected error creating required OpenSCAP directory: %s", oscap.DataDir)) } - - osc.Directories = append(osc.Directories, dataDirNode) + osc.Directories = append(osc.Directories, oscapDataNode) var datastream = oscapConfig.DataStream if datastream == "" { @@ -200,40 +198,29 @@ func osCustomizations( datastream = *imageConfig.DefaultOSCAPDatastream } - oscapStageOptions := osbuild.OscapConfig{ - Datastream: datastream, - ProfileID: oscapConfig.ProfileID, - Compression: true, + remediationConfig := oscap.RemediationConfig{ + Datastream: datastream, + ProfileID: oscapConfig.ProfileID, + CompressionEnabled: true, } + var tailoringConfig *oscap.TailoringConfig if oscapConfig.Tailoring != nil { - newProfile, tailoringFilepath, tailoringDir, err := oscap.GetTailoringFile(oscapConfig.ProfileID) - if err != nil { - panic(fmt.Sprintf("unexpected error creating tailoring file options: %v", err)) + remediationConfig.TailoringPath = filepath.Join(oscap.DataDir, "tailoring.xml") + tailoringConfig = &oscap.TailoringConfig{ + RemediationConfig: remediationConfig, + TailoredProfileID: fmt.Sprintf("%s_osbuild_tailoring", oscapConfig.ProfileID), + Selected: oscapConfig.Tailoring.Selected, + Unselected: oscapConfig.Tailoring.Unselected, } - - tailoringOptions := osbuild.OscapAutotailorConfig{ - NewProfile: newProfile, - Datastream: datastream, - ProfileID: oscapConfig.ProfileID, - Selected: oscapConfig.Tailoring.Selected, - Unselected: oscapConfig.Tailoring.Unselected, - } - - osc.OpenSCAPTailorConfig = osbuild.NewOscapAutotailorStageOptions( - tailoringFilepath, - tailoringOptions, - ) - - // overwrite the profile id with the new tailoring id - oscapStageOptions.ProfileID = newProfile - oscapStageOptions.Tailoring = tailoringFilepath - - // add the parent directory for the tailoring file - osc.Directories = append(osc.Directories, tailoringDir) + // we need to set this after the tailoring config + // since the tailoring config needs to know about both + // the base profile id and the tailored profile id + remediationConfig.ProfileID = tailoringConfig.TailoredProfileID } - osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(oscapDataDir, oscapStageOptions) + osc.OpenSCAPTailorConfig = tailoringConfig + osc.OpenSCAPRemediationConfig = &remediationConfig } osc.ShellInit = imageConfig.ShellInit diff --git a/vendor/github.com/osbuild/images/pkg/distro/fedora/package_sets.go b/vendor/github.com/osbuild/images/pkg/distro/fedora/package_sets.go index bc9bd5696..c257e440e 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/fedora/package_sets.go +++ b/vendor/github.com/osbuild/images/pkg/distro/fedora/package_sets.go @@ -428,7 +428,6 @@ func anacondaPackageSet(t *imageType) rpmmd.PackageSet { "nss-tools", "openssh-clients", "openssh-server", - "oscap-anaconda-addon", "ostree", "pciutils", "perl-interpreter", diff --git a/vendor/github.com/osbuild/images/pkg/distro/rhel/defaults.go b/vendor/github.com/osbuild/images/pkg/distro/rhel/defaults.go index bc0f3cb15..5a7977651 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/rhel/defaults.go +++ b/vendor/github.com/osbuild/images/pkg/distro/rhel/defaults.go @@ -1,8 +1,5 @@ package rhel const ( - // location for saving openscap remediation data - oscapDataDir = "/oscap_data" - UEFIVendor = "redhat" ) diff --git a/vendor/github.com/osbuild/images/pkg/distro/rhel/images.go b/vendor/github.com/osbuild/images/pkg/distro/rhel/images.go index b50979ea3..962b7cfaa 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/rhel/images.go +++ b/vendor/github.com/osbuild/images/pkg/distro/rhel/images.go @@ -3,6 +3,7 @@ package rhel import ( "fmt" "math/rand" + "path/filepath" "github.com/osbuild/images/internal/workload" "github.com/osbuild/images/pkg/blueprint" @@ -204,14 +205,11 @@ func osCustomizations( panic("unexpected oscap options for ostree image type") } - // although the osbuild stage will create this directory, - // it's probably better to ensure that it is created here - dataDirNode, err := fsnode.NewDirectory(oscapDataDir, nil, nil, nil, true) + oscapDataNode, err := fsnode.NewDirectory(oscap.DataDir, nil, nil, nil, true) if err != nil { - panic("unexpected error creating OpenSCAP data directory") + panic(fmt.Sprintf("unexpected error creating required OpenSCAP directory: %s", oscap.DataDir)) } - - osc.Directories = append(osc.Directories, dataDirNode) + osc.Directories = append(osc.Directories, oscapDataNode) var datastream = oscapConfig.DataStream if datastream == "" { @@ -221,40 +219,29 @@ func osCustomizations( datastream = *imageConfig.DefaultOSCAPDatastream } - oscapStageOptions := osbuild.OscapConfig{ - Datastream: datastream, - ProfileID: oscapConfig.ProfileID, - Compression: true, + remediationConfig := oscap.RemediationConfig{ + Datastream: datastream, + ProfileID: oscapConfig.ProfileID, + CompressionEnabled: true, } + var tailoringConfig *oscap.TailoringConfig if oscapConfig.Tailoring != nil { - newProfile, tailoringFilepath, tailoringDir, err := oscap.GetTailoringFile(oscapConfig.ProfileID) - if err != nil { - panic(fmt.Sprintf("unexpected error creating tailoring file options: %v", err)) + remediationConfig.TailoringPath = filepath.Join(oscap.DataDir, "tailoring.xml") + tailoringConfig = &oscap.TailoringConfig{ + RemediationConfig: remediationConfig, + TailoredProfileID: fmt.Sprintf("%s_osbuild_tailoring", oscapConfig.ProfileID), + Selected: oscapConfig.Tailoring.Selected, + Unselected: oscapConfig.Tailoring.Unselected, } - - tailoringOptions := osbuild.OscapAutotailorConfig{ - NewProfile: newProfile, - Datastream: datastream, - ProfileID: oscapConfig.ProfileID, - Selected: oscapConfig.Tailoring.Selected, - Unselected: oscapConfig.Tailoring.Unselected, - } - - osc.OpenSCAPTailorConfig = osbuild.NewOscapAutotailorStageOptions( - tailoringFilepath, - tailoringOptions, - ) - - // overwrite the profile id with the new tailoring id - oscapStageOptions.ProfileID = newProfile - oscapStageOptions.Tailoring = tailoringFilepath - - // add the parent directory for the tailoring file - osc.Directories = append(osc.Directories, tailoringDir) + // we need to set this after the tailoring config + // since the tailoring config needs to know about both + // the base profile id and the tailored profile id + remediationConfig.ProfileID = tailoringConfig.TailoredProfileID } - osc.OpenSCAPConfig = osbuild.NewOscapRemediationStageOptions(oscapDataDir, oscapStageOptions) + osc.OpenSCAPTailorConfig = tailoringConfig + osc.OpenSCAPRemediationConfig = &remediationConfig } osc.ShellInit = imageConfig.ShellInit diff --git a/vendor/github.com/osbuild/images/pkg/dnfjson/dnfjson.go b/vendor/github.com/osbuild/images/pkg/dnfjson/dnfjson.go index efef35c9a..62f81b867 100644 --- a/vendor/github.com/osbuild/images/pkg/dnfjson/dnfjson.go +++ b/vendor/github.com/osbuild/images/pkg/dnfjson/dnfjson.go @@ -196,7 +196,7 @@ func (s *Solver) SetProxy(proxy string) error { func (s *Solver) Depsolve(pkgSets []rpmmd.PackageSet) ([]rpmmd.PackageSpec, []rpmmd.RepoConfig, error) { req, rhsmMap, err := s.makeDepsolveRequest(pkgSets) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("makeDepsolveRequest failed: %w", err) } // get non-exclusive read lock @@ -205,7 +205,7 @@ func (s *Solver) Depsolve(pkgSets []rpmmd.PackageSet) ([]rpmmd.PackageSpec, []rp output, err := run(s.dnfJsonCmd, req) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("running osbuild-depsolve-dnf failed:\n%w", err) } // touch repos to now now := time.Now().Local() @@ -219,7 +219,7 @@ func (s *Solver) Depsolve(pkgSets []rpmmd.PackageSet) ([]rpmmd.PackageSpec, []rp dec := json.NewDecoder(bytes.NewReader(output)) dec.DisallowUnknownFields() if err := dec.Decode(&result); err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("decoding depsolve result failed: %w", err) } packages, repos := result.toRPMMD(rhsmMap) @@ -721,6 +721,13 @@ func (err Error) Error() string { // message. func parseError(data []byte, repos []repoConfig) Error { var e Error + if len(data) == 0 { + return Error{ + Kind: "InternalError", + Reason: "dnf-json output was empty", + } + } + if err := json.Unmarshal(data, &e); err != nil { // dumping the error into the Reason can get noisy, but it's good for troubleshooting return Error{ @@ -776,7 +783,7 @@ func run(dnfJsonCmd []string, req *Request) ([]byte, error) { cmd := exec.Command(ex, args...) stdin, err := cmd.StdinPipe() if err != nil { - return nil, err + return nil, fmt.Errorf("creating stdin pipe for %s failed: %w", ex, err) } cmd.Stderr = os.Stderr @@ -785,12 +792,12 @@ func run(dnfJsonCmd []string, req *Request) ([]byte, error) { err = cmd.Start() if err != nil { - return nil, err + return nil, fmt.Errorf("starting %s failed: %w", ex, err) } err = json.NewEncoder(stdin).Encode(req) if err != nil { - return nil, err + return nil, fmt.Errorf("encoding request for %s failed: %w", ex, err) } stdin.Close() diff --git a/vendor/github.com/osbuild/images/pkg/manifest/os.go b/vendor/github.com/osbuild/images/pkg/manifest/os.go index 2054ec79f..3cd176ce5 100644 --- a/vendor/github.com/osbuild/images/pkg/manifest/os.go +++ b/vendor/github.com/osbuild/images/pkg/manifest/os.go @@ -12,6 +12,7 @@ import ( "github.com/osbuild/images/pkg/container" "github.com/osbuild/images/pkg/customizations/bootc" "github.com/osbuild/images/pkg/customizations/fsnode" + "github.com/osbuild/images/pkg/customizations/oscap" "github.com/osbuild/images/pkg/customizations/shell" "github.com/osbuild/images/pkg/customizations/users" "github.com/osbuild/images/pkg/disk" @@ -96,38 +97,40 @@ type OSCustomizations struct { ShellInit []shell.InitFile // TODO: drop osbuild types from the API - Firewall *osbuild.FirewallStageOptions - Grub2Config *osbuild.GRUB2Config - Sysconfig []*osbuild.SysconfigStageOptions - SystemdLogind []*osbuild.SystemdLogindStageOptions - CloudInit []*osbuild.CloudInitStageOptions - Modprobe []*osbuild.ModprobeStageOptions - DracutConf []*osbuild.DracutConfStageOptions - SystemdUnit []*osbuild.SystemdUnitStageOptions - Authselect *osbuild.AuthselectStageOptions - SELinuxConfig *osbuild.SELinuxConfigStageOptions - Tuned *osbuild.TunedStageOptions - Tmpfilesd []*osbuild.TmpfilesdStageOptions - PamLimitsConf []*osbuild.PamLimitsConfStageOptions - Sysctld []*osbuild.SysctldStageOptions - DNFConfig []*osbuild.DNFConfigStageOptions - DNFAutomaticConfig *osbuild.DNFAutomaticConfigStageOptions - YUMConfig *osbuild.YumConfigStageOptions - YUMRepos []*osbuild.YumReposStageOptions - SshdConfig *osbuild.SshdConfigStageOptions - GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions - AuthConfig *osbuild.AuthconfigStageOptions - PwQuality *osbuild.PwqualityConfStageOptions - OpenSCAPTailorConfig *osbuild.OscapAutotailorStageOptions - OpenSCAPConfig *osbuild.OscapRemediationStageOptions - NTPServers []osbuild.ChronyConfigServer - WAAgentConfig *osbuild.WAAgentConfStageOptions - UdevRules *osbuild.UdevRulesStageOptions - WSLConfig *osbuild.WSLConfStageOptions - LeapSecTZ *string - FactAPIType *facts.APIType - Presets []osbuild.Preset - ContainersStorage *string + Firewall *osbuild.FirewallStageOptions + Grub2Config *osbuild.GRUB2Config + Sysconfig []*osbuild.SysconfigStageOptions + SystemdLogind []*osbuild.SystemdLogindStageOptions + CloudInit []*osbuild.CloudInitStageOptions + Modprobe []*osbuild.ModprobeStageOptions + DracutConf []*osbuild.DracutConfStageOptions + SystemdUnit []*osbuild.SystemdUnitStageOptions + Authselect *osbuild.AuthselectStageOptions + SELinuxConfig *osbuild.SELinuxConfigStageOptions + Tuned *osbuild.TunedStageOptions + Tmpfilesd []*osbuild.TmpfilesdStageOptions + PamLimitsConf []*osbuild.PamLimitsConfStageOptions + Sysctld []*osbuild.SysctldStageOptions + DNFConfig []*osbuild.DNFConfigStageOptions + DNFAutomaticConfig *osbuild.DNFAutomaticConfigStageOptions + YUMConfig *osbuild.YumConfigStageOptions + YUMRepos []*osbuild.YumReposStageOptions + SshdConfig *osbuild.SshdConfigStageOptions + GCPGuestAgentConfig *osbuild.GcpGuestAgentConfigOptions + AuthConfig *osbuild.AuthconfigStageOptions + PwQuality *osbuild.PwqualityConfStageOptions + NTPServers []osbuild.ChronyConfigServer + WAAgentConfig *osbuild.WAAgentConfStageOptions + UdevRules *osbuild.UdevRulesStageOptions + WSLConfig *osbuild.WSLConfStageOptions + LeapSecTZ *string + FactAPIType *facts.APIType + Presets []osbuild.Preset + ContainersStorage *string + + // OpenSCAP config + OpenSCAPTailorConfig *oscap.TailoringConfig + OpenSCAPRemediationConfig *oscap.RemediationConfig Subscription *subscription.ImageOptions RHSMConfig map[subscription.RHSMStatus]*osbuild.RHSMStageOptions @@ -230,7 +233,7 @@ func (p *OS) getPackageSetChain(Distro) []rpmmd.PackageSet { packages = append(packages, fmt.Sprintf("selinux-policy-%s", p.SElinux)) } - if p.OpenSCAPConfig != nil { + if p.OpenSCAPRemediationConfig != nil { packages = append(packages, "openscap-scanner", "scap-security-guide", "xz") } @@ -246,6 +249,16 @@ func (p *OS) getPackageSetChain(Distro) []rpmmd.PackageSet { } } + if len(p.Users) > 0 { + // org.osbuild.users runs useradd, usermod, passwd, and + // mkhomedir_helper in the os tree using chroot. Most image types + // should already have the required packages, but some minimal image + // types, like 'tar' don't, so let's add them for the stage to run and + // to enable user management in the image. + packages = append(packages, "shadow-utils", "pam") + + } + osRepos := append(p.repos, p.ExtraBaseRepos...) chain := []rpmmd.PackageSet{ @@ -795,19 +808,22 @@ func (p *OS) serialize() osbuild.Pipeline { } if p.OpenSCAPTailorConfig != nil { - if p.OpenSCAPConfig == nil { + if p.OpenSCAPRemediationConfig == nil { // This is a programming error, since it doesn't make sense // to have tailoring configs without openscap config. panic(fmt.Errorf("OpenSCAP autotailoring cannot be set if no OpenSCAP config has been provided")) } - pipeline.AddStage(osbuild.NewOscapAutotailorStage(p.OpenSCAPTailorConfig)) + + tailoringStageOpts := osbuild.NewOscapAutotailorStageOptions(p.OpenSCAPTailorConfig) + pipeline.AddStage(osbuild.NewOscapAutotailorStage(tailoringStageOpts)) } // NOTE: We need to run the OpenSCAP stages as the last stage before SELinux // since the remediation may change file permissions and other aspects of the // hardened image - if p.OpenSCAPConfig != nil { - pipeline.AddStage(osbuild.NewOscapRemediationStage(p.OpenSCAPConfig)) + if p.OpenSCAPRemediationConfig != nil { + remediationStageOpts := osbuild.NewOscapRemediationStageOptions(oscap.DataDir, p.OpenSCAPRemediationConfig) + pipeline.AddStage(osbuild.NewOscapRemediationStage(remediationStageOpts)) } if len(p.Presets) != 0 { diff --git a/vendor/github.com/osbuild/images/pkg/manifest/raw_bootc.go b/vendor/github.com/osbuild/images/pkg/manifest/raw_bootc.go index 00245fca9..60a8d2c40 100644 --- a/vendor/github.com/osbuild/images/pkg/manifest/raw_bootc.go +++ b/vendor/github.com/osbuild/images/pkg/manifest/raw_bootc.go @@ -142,11 +142,11 @@ func (p *RawBootcImage) serialize() osbuild.Pipeline { inputs := osbuild.ContainerDeployInputs{ Images: osbuild.NewContainersInputForSingleSource(p.containerSpecs[0]), } - devices, mounts, err := osbuild.GenBootupdDevicesMounts(p.filename, p.PartitionTable) + devices, mounts, err := osbuild.GenBootupdDevicesMounts(p.filename, p.PartitionTable, p.platform) if err != nil { panic(err) } - st, err := osbuild.NewBootcInstallToFilesystemStage(opts, inputs, devices, mounts) + st, err := osbuild.NewBootcInstallToFilesystemStage(opts, inputs, devices, mounts, p.platform) if err != nil { panic(err) } @@ -158,7 +158,7 @@ func (p *RawBootcImage) serialize() osbuild.Pipeline { // all our customizations work directly on the mounted deployment // root from the image so generate the devices/mounts for all - devices, mounts, err = osbuild.GenBootupdDevicesMounts(p.filename, p.PartitionTable) + devices, mounts, err = osbuild.GenBootupdDevicesMounts(p.filename, p.PartitionTable, p.platform) if err != nil { panic(fmt.Sprintf("gen devices stage failed %v", err)) } diff --git a/vendor/github.com/osbuild/images/pkg/manifest/raw_ostree.go b/vendor/github.com/osbuild/images/pkg/manifest/raw_ostree.go index a1e54c86e..ef4c7a2a5 100644 --- a/vendor/github.com/osbuild/images/pkg/manifest/raw_ostree.go +++ b/vendor/github.com/osbuild/images/pkg/manifest/raw_ostree.go @@ -122,7 +122,7 @@ func (p *RawOSTreeImage) serialize() osbuild.Pipeline { func (p *RawOSTreeImage) addBootupdStage(pipeline *osbuild.Pipeline) { pt := p.treePipeline.PartitionTable - treeBootupdDevices, treeBootupdMounts, err := osbuild.GenBootupdDevicesMounts(p.Filename(), pt) + treeBootupdDevices, treeBootupdMounts, err := osbuild.GenBootupdDevicesMounts(p.Filename(), pt, p.platform) if err != nil { panic(err) } @@ -138,7 +138,7 @@ func (p *RawOSTreeImage) addBootupdStage(pipeline *osbuild.Pipeline) { Device: "disk", } } - bootupd, err := osbuild.NewBootupdStage(opts, treeBootupdDevices, treeBootupdMounts) + bootupd, err := osbuild.NewBootupdStage(opts, treeBootupdDevices, treeBootupdMounts, p.platform) if err != nil { panic(err) } diff --git a/vendor/github.com/osbuild/images/pkg/osbuild/bootc_install_to_filesystem_stage.go b/vendor/github.com/osbuild/images/pkg/osbuild/bootc_install_to_filesystem_stage.go index 77090006a..1db46c04e 100644 --- a/vendor/github.com/osbuild/images/pkg/osbuild/bootc_install_to_filesystem_stage.go +++ b/vendor/github.com/osbuild/images/pkg/osbuild/bootc_install_to_filesystem_stage.go @@ -2,6 +2,8 @@ package osbuild import ( "fmt" + + "github.com/osbuild/images/pkg/platform" ) type BootcInstallToFilesystemOptions struct { @@ -24,8 +26,8 @@ func (BootcInstallToFilesystemOptions) isStageOptions() {} // bootc/bootupd find and install all required bootloader bits. // // The mounts input should be generated with GenBootupdDevicesMounts. -func NewBootcInstallToFilesystemStage(options *BootcInstallToFilesystemOptions, inputs ContainerDeployInputs, devices map[string]Device, mounts []Mount) (*Stage, error) { - if err := validateBootupdMounts(mounts); err != nil { +func NewBootcInstallToFilesystemStage(options *BootcInstallToFilesystemOptions, inputs ContainerDeployInputs, devices map[string]Device, mounts []Mount, pltf platform.Platform) (*Stage, error) { + if err := validateBootupdMounts(mounts, pltf); err != nil { return nil, err } diff --git a/vendor/github.com/osbuild/images/pkg/osbuild/bootupd_stage.go b/vendor/github.com/osbuild/images/pkg/osbuild/bootupd_stage.go index add209777..0cec11d89 100644 --- a/vendor/github.com/osbuild/images/pkg/osbuild/bootupd_stage.go +++ b/vendor/github.com/osbuild/images/pkg/osbuild/bootupd_stage.go @@ -6,6 +6,7 @@ import ( "github.com/osbuild/images/internal/common" "github.com/osbuild/images/pkg/disk" + "github.com/osbuild/images/pkg/platform" ) type BootupdStageOptionsBios struct { @@ -38,11 +39,13 @@ func (opts *BootupdStageOptions) validate(devices map[string]Device) error { // validateBootupdMounts ensures that all required mounts for the bootup // stage are generated. Right now the stage requires root, boot and boot/efi // to find all the bootloader configs -func validateBootupdMounts(mounts []Mount) error { +func validateBootupdMounts(mounts []Mount, pf platform.Platform) error { requiredMounts := map[string]bool{ - "/": true, - "/boot": true, - "/boot/efi": true, + "/": true, + "/boot": true, + } + if pf.GetUEFIVendor() != "" { + requiredMounts["/boot/efi"] = true } for _, mnt := range mounts { delete(requiredMounts, mnt.Target) @@ -61,8 +64,8 @@ func validateBootupdMounts(mounts []Mount) error { // NewBootupdStage creates a new stage for the org.osbuild.bootupd stage. It // requires a mount setup of "/", "/boot" and "/boot/efi" right now so that // bootupd can find and install all required bootloader bits. -func NewBootupdStage(opts *BootupdStageOptions, devices map[string]Device, mounts []Mount) (*Stage, error) { - if err := validateBootupdMounts(mounts); err != nil { +func NewBootupdStage(opts *BootupdStageOptions, devices map[string]Device, mounts []Mount, pltf platform.Platform) (*Stage, error) { + if err := validateBootupdMounts(mounts, pltf); err != nil { return nil, err } if err := opts.validate(devices); err != nil { @@ -108,7 +111,7 @@ func genMountsForBootupd(source string, pt *disk.PartitionTable) ([]Mount, error return mounts, nil } -func GenBootupdDevicesMounts(filename string, pt *disk.PartitionTable) (map[string]Device, []Mount, error) { +func GenBootupdDevicesMounts(filename string, pt *disk.PartitionTable, pltf platform.Platform) (map[string]Device, []Mount, error) { devName := "disk" devices := map[string]Device{ devName: Device{ @@ -123,7 +126,7 @@ func GenBootupdDevicesMounts(filename string, pt *disk.PartitionTable) (map[stri if err != nil { return nil, nil, err } - if err := validateBootupdMounts(mounts); err != nil { + if err := validateBootupdMounts(mounts, pltf); err != nil { return nil, nil, err } diff --git a/vendor/github.com/osbuild/images/pkg/osbuild/osbuild-exec.go b/vendor/github.com/osbuild/images/pkg/osbuild/osbuild-exec.go index aa68bf423..7af282053 100644 --- a/vendor/github.com/osbuild/images/pkg/osbuild/osbuild-exec.go +++ b/vendor/github.com/osbuild/images/pkg/osbuild/osbuild-exec.go @@ -70,6 +70,9 @@ func RunOSBuild(manifest []byte, store, outputDirectory string, exports, checkpo if result { // try to decode the output even though the job could have failed + if stdoutBuffer.Len() == 0 { + return nil, fmt.Errorf("osbuild did not return any output") + } decodeErr := json.Unmarshal(stdoutBuffer.Bytes(), &res) if decodeErr != nil { return nil, fmt.Errorf("error decoding osbuild output: %v\nthe raw output:\n%s", decodeErr, stdoutBuffer.String()) diff --git a/vendor/github.com/osbuild/images/pkg/osbuild/oscap_autotailor_stage.go b/vendor/github.com/osbuild/images/pkg/osbuild/oscap_autotailor_stage.go index 36b3d6544..66efa8bef 100644 --- a/vendor/github.com/osbuild/images/pkg/osbuild/oscap_autotailor_stage.go +++ b/vendor/github.com/osbuild/images/pkg/osbuild/oscap_autotailor_stage.go @@ -1,6 +1,10 @@ package osbuild -import "fmt" +import ( + "fmt" + + "github.com/osbuild/images/pkg/customizations/oscap" +) type OscapAutotailorStageOptions struct { Filepath string `json:"filepath"` @@ -8,11 +12,11 @@ type OscapAutotailorStageOptions struct { } type OscapAutotailorConfig struct { - NewProfile string `json:"new_profile"` - Datastream string `json:"datastream" toml:"datastream"` - ProfileID string `json:"profile_id" toml:"profile_id"` - Selected []string `json:"selected,omitempty"` - Unselected []string `json:"unselected,omitempty"` + TailoredProfileID string `json:"new_profile"` + Datastream string `json:"datastream"` + ProfileID string `json:"profile_id"` + Selected []string `json:"selected,omitempty"` + Unselected []string `json:"unselected,omitempty"` } func (OscapAutotailorStageOptions) isStageOptions() {} @@ -24,7 +28,7 @@ func (c OscapAutotailorConfig) validate() error { if c.ProfileID == "" { return fmt.Errorf("'profile_id' must be specified") } - if c.NewProfile == "" { + if c.TailoredProfileID == "" { return fmt.Errorf("'new_profile' must be specified") } return nil @@ -41,15 +45,25 @@ func NewOscapAutotailorStage(options *OscapAutotailorStageOptions) *Stage { } } -func NewOscapAutotailorStageOptions(filepath string, autotailorOptions OscapAutotailorConfig) *OscapAutotailorStageOptions { +func NewOscapAutotailorStageOptions(options *oscap.TailoringConfig) *OscapAutotailorStageOptions { + if options == nil { + return nil + } + + // TODO: don't panic! unfortunately this would involve quite + // a big refactor and we still need to be a bit defensive here + if options.RemediationConfig.TailoringPath == "" { + panic(fmt.Errorf("The tailoring path for the OpenSCAP remediation config cannot be empty, this is a programming error")) + } + return &OscapAutotailorStageOptions{ - Filepath: filepath, + Filepath: options.RemediationConfig.TailoringPath, Config: OscapAutotailorConfig{ - NewProfile: autotailorOptions.NewProfile, - Datastream: autotailorOptions.Datastream, - ProfileID: autotailorOptions.ProfileID, - Selected: autotailorOptions.Selected, - Unselected: autotailorOptions.Unselected, + TailoredProfileID: options.TailoredProfileID, + Datastream: options.RemediationConfig.Datastream, + ProfileID: options.RemediationConfig.ProfileID, + Selected: options.Selected, + Unselected: options.Unselected, }, } } diff --git a/vendor/github.com/osbuild/images/pkg/osbuild/oscap_remediation_stage.go b/vendor/github.com/osbuild/images/pkg/osbuild/oscap_remediation_stage.go index b1408218b..fb51094fb 100644 --- a/vendor/github.com/osbuild/images/pkg/osbuild/oscap_remediation_stage.go +++ b/vendor/github.com/osbuild/images/pkg/osbuild/oscap_remediation_stage.go @@ -1,6 +1,10 @@ package osbuild -import "fmt" +import ( + "fmt" + + "github.com/osbuild/images/pkg/customizations/oscap" +) type OscapVerbosityLevel string @@ -72,21 +76,18 @@ func NewOscapRemediationStage(options *OscapRemediationStageOptions) *Stage { } } -func NewOscapRemediationStageOptions(dataDir string, options OscapConfig) *OscapRemediationStageOptions { +func NewOscapRemediationStageOptions(dataDir string, options *oscap.RemediationConfig) *OscapRemediationStageOptions { + if options == nil { + return nil + } + return &OscapRemediationStageOptions{ DataDir: dataDir, Config: OscapConfig{ - ProfileID: options.ProfileID, - Datastream: options.Datastream, - DatastreamID: options.DatastreamID, - Tailoring: options.Tailoring, - XCCDFID: options.XCCDFID, - BenchmarkID: options.BenchmarkID, - ArfResult: options.ArfResult, - HtmlReport: options.HtmlReport, - VerboseLog: options.VerboseLog, - VerboseLevel: options.VerboseLevel, - Compression: options.Compression, + ProfileID: options.ProfileID, + Datastream: options.Datastream, + Tailoring: options.TailoringPath, + Compression: options.CompressionEnabled, }, } } diff --git a/vendor/github.com/osbuild/images/pkg/osbuild/result_test_data.go b/vendor/github.com/osbuild/images/pkg/osbuild/result_test_data.go index d4b8a4e04..f9fb230b3 100644 --- a/vendor/github.com/osbuild/images/pkg/osbuild/result_test_data.go +++ b/vendor/github.com/osbuild/images/pkg/osbuild/result_test_data.go @@ -11,9 +11,7 @@ const v1ResultSuccess = ` "id": "6efecc9910457689fa56b362cfed5f0c9af2c9f1ebfe35bd5d54558b1fcbda06", "options": { "gpgkeys": [ - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n", - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n", - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n" + "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v2.0.22 (GNU/Linux)\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwEIACACGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAUCSuBJPAAK\nCRAZni+R/UMdUfIkD/9m3HWv07uJG26R3KBexTo2FFu3rmZs+m2nfW8R3dBX+k0o\nAOFpgJCsNgKwU81LOPrkMN19G0+Yn/ZTCDD7cIQ7dhYuDyEX97xh4une/EhnnRuh\nASzR+1xYbj/HcYZIL9kbslgpebMn+AhxbUTQF/mziug3hLidR9Bzvygq0Q09E11c\nOZL4BU6J2HqxL+9m2F+tnLdfhL7MsAq9nbmWAOpkbGefc5SXBSq0sWfwoes3X3yD\nQ8B5Xqr9AxABU7oUB+wRqvY69ZCxi/BhuuJCUxY89ZmwXfkVxeHl1tYfROUwOnJO\nGYSbI/o41KBK4DkIiDcT7QqvqvCyudnxZdBjL2QU6OrIJvWmKs319qSF9m3mXRSt\nZzWtB89Pj5LZ6cdtuHvW9GO4qSoBLmAfB313pGkbgi1DE6tqCLHlA0yQ8zv99OWV\ncMDGmS7tVTZqfX1xQJ0N3bNORQNtikJC3G+zBCJzIeZleeDlMDQcww00yWU1oE7/\nTo2UmykMGc7o9iggFWR2g0PIcKsA/SXdRKWPqCHG2uKHBvdRTQGupdXQ1sbV+AHw\nycyA/9H/mp/NUSNM2cqnBDcZ6GhlHt59zWtEveiuU5fpTbp4GVcFXbW8jStj8j8z\n1HI3cywZO8+YNPzqyx0JWsidXGkfzkPHyS4jTG84lfu2JG8m/nqLnRSeKpl20Q==\n=79bX\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----" ], "packages": [ { @@ -58,9 +56,7 @@ const v1ResultSuccess = ` "id": "dcf25b0b73304c9ea2e02fe1c835c5234242697061d37849b23a2bd44de764ff", "options": { "gpgkeys": [ - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n", - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n", - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n" + "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v2.0.22 (GNU/Linux)\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwEIACACGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAUCSuBJPAAK\nCRAZni+R/UMdUfIkD/9m3HWv07uJG26R3KBexTo2FFu3rmZs+m2nfW8R3dBX+k0o\nAOFpgJCsNgKwU81LOPrkMN19G0+Yn/ZTCDD7cIQ7dhYuDyEX97xh4une/EhnnRuh\nASzR+1xYbj/HcYZIL9kbslgpebMn+AhxbUTQF/mziug3hLidR9Bzvygq0Q09E11c\nOZL4BU6J2HqxL+9m2F+tnLdfhL7MsAq9nbmWAOpkbGefc5SXBSq0sWfwoes3X3yD\nQ8B5Xqr9AxABU7oUB+wRqvY69ZCxi/BhuuJCUxY89ZmwXfkVxeHl1tYfROUwOnJO\nGYSbI/o41KBK4DkIiDcT7QqvqvCyudnxZdBjL2QU6OrIJvWmKs319qSF9m3mXRSt\nZzWtB89Pj5LZ6cdtuHvW9GO4qSoBLmAfB313pGkbgi1DE6tqCLHlA0yQ8zv99OWV\ncMDGmS7tVTZqfX1xQJ0N3bNORQNtikJC3G+zBCJzIeZleeDlMDQcww00yWU1oE7/\nTo2UmykMGc7o9iggFWR2g0PIcKsA/SXdRKWPqCHG2uKHBvdRTQGupdXQ1sbV+AHw\nycyA/9H/mp/NUSNM2cqnBDcZ6GhlHt59zWtEveiuU5fpTbp4GVcFXbW8jStj8j8z\n1HI3cywZO8+YNPzqyx0JWsidXGkfzkPHyS4jTG84lfu2JG8m/nqLnRSeKpl20Q==\n=79bX\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----" ], "packages": [ { @@ -296,9 +292,7 @@ const v1ResultFailure = ` "id": "6efecc9910457689fa56b362cfed5f0c9af2c9f1ebfe35bd5d54558b1fcbda06", "options": { "gpgkeys": [ - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n", - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n", - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n" + "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v2.0.22 (GNU/Linux)\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwEIACACGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAUCSuBJPAAK\nCRAZni+R/UMdUfIkD/9m3HWv07uJG26R3KBexTo2FFu3rmZs+m2nfW8R3dBX+k0o\nAOFpgJCsNgKwU81LOPrkMN19G0+Yn/ZTCDD7cIQ7dhYuDyEX97xh4une/EhnnRuh\nASzR+1xYbj/HcYZIL9kbslgpebMn+AhxbUTQF/mziug3hLidR9Bzvygq0Q09E11c\nOZL4BU6J2HqxL+9m2F+tnLdfhL7MsAq9nbmWAOpkbGefc5SXBSq0sWfwoes3X3yD\nQ8B5Xqr9AxABU7oUB+wRqvY69ZCxi/BhuuJCUxY89ZmwXfkVxeHl1tYfROUwOnJO\nGYSbI/o41KBK4DkIiDcT7QqvqvCyudnxZdBjL2QU6OrIJvWmKs319qSF9m3mXRSt\nZzWtB89Pj5LZ6cdtuHvW9GO4qSoBLmAfB313pGkbgi1DE6tqCLHlA0yQ8zv99OWV\ncMDGmS7tVTZqfX1xQJ0N3bNORQNtikJC3G+zBCJzIeZleeDlMDQcww00yWU1oE7/\nTo2UmykMGc7o9iggFWR2g0PIcKsA/SXdRKWPqCHG2uKHBvdRTQGupdXQ1sbV+AHw\nycyA/9H/mp/NUSNM2cqnBDcZ6GhlHt59zWtEveiuU5fpTbp4GVcFXbW8jStj8j8z\n1HI3cywZO8+YNPzqyx0JWsidXGkfzkPHyS4jTG84lfu2JG8m/nqLnRSeKpl20Q==\n=79bX\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----" ], "packages": [ { @@ -343,9 +337,7 @@ const v1ResultFailure = ` "id": "dcf25b0b73304c9ea2e02fe1c835c5234242697061d37849b23a2bd44de764ff", "options": { "gpgkeys": [ - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n", - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n", - "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwECACAFAkrgSTsCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAK\nCRAZni+R/UMdUWzpD/9s5SFR/ZF3yjY5VLUFLMXIKUztNN3oc45fyLdTI3+UClKC\n2tEruzYjqNHhqAEXa2sN1fMrsuKec61Ll2NfvJjkLKDvgVIh7kM7aslNYVOP6BTf\nC/JJ7/ufz3UZmyViH/WDl+AYdgk3JqCIO5w5ryrC9IyBzYv2m0HqYbWfphY3uHw5\nun3ndLJcu8+BGP5F+ONQEGl+DRH58Il9Jp3HwbRa7dvkPgEhfFR+1hI+Btta2C7E\n0/2NKzCxZw7Lx3PBRcU92YKyaEihfy/aQKZCAuyfKiMvsmzs+4poIX7I9NQCJpyE\nIGfINoZ7VxqHwRn/d5mw2MZTJjbzSf+Um9YJyA0iEEyD6qjriWQRbuxpQXmlAJbh\n8okZ4gbVFv1F8MzK+4R8VvWJ0XxgtikSo72fHjwha7MAjqFnOq6eo6fEC/75g3NL\nGht5VdpGuHk0vbdENHMC8wS99e5qXGNDued3hlTavDMlEAHl34q2H9nakTGRF5Ki\nJUfNh3DVRGhg8cMIti21njiRh7gyFI2OccATY7bBSr79JhuNwelHuxLrCFpY7V25\nOFktl15jZJaMxuQBqYdBgSay2G0U6D1+7VsWufpzd/Abx1/c3oi9ZaJvW22kAggq\ndzdA27UUYjWvx42w9menJwh/0jeQcTecIUd0d0rFcw/c1pvgMMl/Q73yzKgKYw==\n=zbHE\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----\n" + "-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: GnuPG v2.0.22 (GNU/Linux)\n\nmQINBErgSTsBEACh2A4b0O9t+vzC9VrVtL1AKvUWi9OPCjkvR7Xd8DtJxeeMZ5eF\n0HtzIG58qDRybwUe89FZprB1ffuUKzdE+HcL3FbNWSSOXVjZIersdXyH3NvnLLLF\n0DNRB2ix3bXG9Rh/RXpFsNxDp2CEMdUvbYCzE79K1EnUTVh1L0Of023FtPSZXX0c\nu7Pb5DI5lX5YeoXO6RoodrIGYJsVBQWnrWw4xNTconUfNPk0EGZtEnzvH2zyPoJh\nXGF+Ncu9XwbalnYde10OCvSWAZ5zTCpoLMTvQjWpbCdWXJzCm6G+/hx9upke546H\n5IjtYm4dTIVTnc3wvDiODgBKRzOl9rEOCIgOuGtDxRxcQkjrC+xvg5Vkqn7vBUyW\n9pHedOU+PoF3DGOM+dqv+eNKBvh9YF9ugFAQBkcG7viZgvGEMGGUpzNgN7XnS1gj\n/DPo9mZESOYnKceve2tIC87p2hqjrxOHuI7fkZYeNIcAoa83rBltFXaBDYhWAKS1\nPcXS1/7JzP0ky7d0L6Xbu/If5kqWQpKwUInXtySRkuraVfuK3Bpa+X1XecWi24JY\nHVtlNX025xx1ewVzGNCTlWn1skQN2OOoQTV4C8/qFpTW6DTWYurd4+fE0OJFJZQF\nbuhfXYwmRlVOgN5i77NTIJZJQfYFj38c/Iv5vZBPokO6mffrOTv3MHWVgQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChyZWxlYXNlIGtleSAyKSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjYEEwEIACACGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAUCSuBJPAAK\nCRAZni+R/UMdUfIkD/9m3HWv07uJG26R3KBexTo2FFu3rmZs+m2nfW8R3dBX+k0o\nAOFpgJCsNgKwU81LOPrkMN19G0+Yn/ZTCDD7cIQ7dhYuDyEX97xh4une/EhnnRuh\nASzR+1xYbj/HcYZIL9kbslgpebMn+AhxbUTQF/mziug3hLidR9Bzvygq0Q09E11c\nOZL4BU6J2HqxL+9m2F+tnLdfhL7MsAq9nbmWAOpkbGefc5SXBSq0sWfwoes3X3yD\nQ8B5Xqr9AxABU7oUB+wRqvY69ZCxi/BhuuJCUxY89ZmwXfkVxeHl1tYfROUwOnJO\nGYSbI/o41KBK4DkIiDcT7QqvqvCyudnxZdBjL2QU6OrIJvWmKs319qSF9m3mXRSt\nZzWtB89Pj5LZ6cdtuHvW9GO4qSoBLmAfB313pGkbgi1DE6tqCLHlA0yQ8zv99OWV\ncMDGmS7tVTZqfX1xQJ0N3bNORQNtikJC3G+zBCJzIeZleeDlMDQcww00yWU1oE7/\nTo2UmykMGc7o9iggFWR2g0PIcKsA/SXdRKWPqCHG2uKHBvdRTQGupdXQ1sbV+AHw\nycyA/9H/mp/NUSNM2cqnBDcZ6GhlHt59zWtEveiuU5fpTbp4GVcFXbW8jStj8j8z\n1HI3cywZO8+YNPzqyx0JWsidXGkfzkPHyS4jTG84lfu2JG8m/nqLnRSeKpl20Q==\n=79bX\n-----END PGP PUBLIC KEY BLOCK-----\n-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsy23UBEACUKSphFEIEvNpy68VeW4Dt6qv+mU6am9a2AAl10JANLj1oqWX+\noYk3en1S6cVe2qehSL5DGVa3HMUZkP3dtbD4SgzXzxPodebPcr4+0QNWigkUisri\nXGL5SCEcOP30zDhZvg+4mpO2jMi7Kc1DLPzBBkgppcX91wa0L1pQzBcvYMPyV/Dh\nKbQHR75WdkP6OA2JXdfC94nxYq+2e0iPqC1hCP3Elh+YnSkOkrawDPmoB1g4+ft/\nxsiVGVy/W0ekXmgvYEHt6si6Y8NwXgnTMqxeSXQ9YUgVIbTpsxHQKGy76T5lMlWX\n4LCOmEVomBJg1SqF6yi9Vu8TeNThaDqT4/DddYInd0OO69s0kGIXalVgGYiW2HOD\nx2q5R1VGCoJxXomz+EbOXY+HpKPOHAjU0DB9MxbU3S248LQ69nIB5uxysy0PSco1\nsdZ8sxRNQ9Dw6on0Nowx5m6Thefzs5iK3dnPGBqHTT43DHbnWc2scjQFG+eZhe98\nEll/kb6vpBoY4bG9/wCG9qu7jj9Z+BceCNKeHllbezVLCU/Hswivr7h2dnaEFvPD\nO4GqiWiwOF06XaBMVgxA8p2HRw0KtXqOpZk+o+sUvdPjsBw42BB96A1yFX4jgFNA\nPyZYnEUdP6OOv9HSjnl7k/iEkvHq/jGYMMojixlvXpGXhnt5jNyc4GSUJQARAQAB\ntDNSZWQgSGF0LCBJbmMuIChhdXhpbGlhcnkga2V5KSA8c2VjdXJpdHlAcmVkaGF0\nLmNvbT6JAjkEEwECACMFAlsy23UCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIX\ngAAKCRD3b2bD1AgnknqOD/9fB2ASuG2aJIiap4kK58R+RmOVM4qgclAnaG57+vjI\nnKvyfV3NH/keplGNRxwqHekfPCqvkpABwhdGEXIE8ILqnPewIMr6PZNZWNJynZ9i\neSMzVuCG7jDoGyQ5/6B0f6xeBtTeBDiRl7+Alehet1twuGL1BJUYG0QuLgcEzkaE\n/gkuumeVcazLzz7L12D22nMk66GxmgXfqS5zcbqOAuZwaA6VgSEgFdV2X2JU79zS\nBQJXv7NKc+nDXFG7M7EHjY3Rma3HXkDbkT8bzh9tJV7Z7TlpT829pStWQyoxKCVq\nsEX8WsSapTKA3P9YkYCwLShgZu4HKRFvHMaIasSIZWzLu+RZH/4yyHOhj0QB7XMY\neHQ6fGSbtJ+K6SrpHOOsKQNAJ0hVbSrnA1cr5+2SDfel1RfYt0W9FA6DoH/S5gAR\ndzT1u44QVwwp3U+eFpHphFy//uzxNMtCjjdkpzhYYhOCLNkDrlRPb+bcoL/6ePSr\n016PA7eEnuC305YU1Ml2WcCn7wQV8x90o33klJmEkWtXh3X39vYtI4nCPIvZn1eP\nVy+F+wWt4vN2b8oOdlzc2paOembbCo2B+Wapv5Y9peBvlbsDSgqtJABfK8KQq/jK\nYl3h5elIa1I3uNfczeHOnf1enLOUOlq630yeM/yHizz99G1g+z/guMh5+x/OHraW\niLkCDQRbMtt1ARAA1lNsWklhS9LoBdolTVtg65FfdFJr47pzKRGYIoGLbcJ155ND\nG+P8UrM06E/ah06EEWuvu2YyyYAz1iYGsCwHAXtbEJh+1tF0iOVx2vnZPgtIGE9V\nP95V5ZvWvB3bdke1z8HadDA+/Ve7fbwXXLa/z9QhSQgsJ8NS8KYnDDjI4EvQtv0i\nPVLY8+u8z6VyiV9RJyn8UEZEJdbFDF9AZAT8103w8SEo/cvIoUbVKZLGcXdAIjCa\ny04u6jsrMp9UGHZX7+srT+9YHDzQixei4IdmxUcqtiNR2/bFHpHCu1pzYjXj968D\n8Ng2txBXDgs16BF/9l++GWKz2dOSH0jdS6sFJ/Dmg7oYnJ2xKSJEmcnV8Z0M1n4w\nXR1t/KeKZe3aR+RXCAEVC5dQ3GbRW2+WboJ6ldgFcVcOv6iOSWP9TrLzFPOpCsIr\nnHE+cMBmPHq3dUm7KeYXQ6wWWmtXlw6widf7cBcGFeELpuU9klzqdKze8qo2oMkf\nrfxIq8zdciPxZXb/75dGWs6dLHQmDpo4MdQVskw5vvwHicMpUpGpxkX7X1XAfdQf\nyIHLGT4ZXuMLIMUPdzJE0Vwt/RtJrZ+feLSv/+0CkkpGHORYroGwIBrJ2RikgcV2\nbc98V/27Kz2ngUCEwnmlhIcrY4IGAAZzUAl0GLHSevPbAREu4fDW4Y+ztOsAEQEA\nAYkCHwQYAQIACQUCWzLbdQIbDAAKCRD3b2bD1AgnkusfD/9U4sPtZfMw6cII167A\nXRZOO195G7oiAnBUw5AW6EK0SAHVZcuW0LMMXnGe9f4UsEUgCNwo5mvLWPxzKqFq\n6/G3kEZVFwZ0qrlLoJPeHNbOcfkeZ9NgD/OhzQmdylM0IwGM9DMrm2YS4EVsmm2b\n53qKIfIyysp1yAGcTnBwBbZ85osNBl2KRDIPhMs0bnmGB7IAvwlSb+xm6vWKECkO\nlwQDO5Kg8YZ8+Z3pn/oS688t/fPXvWLZYUqwR63oWfIaPJI7Ahv2jJmgw1ofL81r\n2CE3T/OydtUeGLzqWJAB8sbUgT3ug0cjtxsHuroQBSYBND3XDb/EQh5GeVVnGKKH\ngESLFAoweoNjDSXrlIu1gFjCDHF4CqBRmNYKrNQjLmhCrSfwkytXESJwlLzFKY8P\nK1yZyTpDC9YK0G7qgrk7EHmH9JAZTQ5V65pp0vR9KvqTU5ewkQDIljD2f3FIqo2B\nSKNCQE+N6NjWaTeNlU75m+yZocKObSPg0zS8FAuSJetNtzXA7ouqk34OoIMQj4gq\nUnh/i1FcZAd4U6Dtr9aRZ6PeLlm6MJ/h582L6fJLNEu136UWDtJj5eBYEzX13l+d\nSC4PEHx7ZZRwQKptl9NkinLZGJztg175paUu8C34sAv+SQnM20c0pdOXAq9GKKhi\nvt61kpkXoRGxjTlc6h+69aidSg==\n=ls8J\n-----END PGP PUBLIC KEY BLOCK-----" ], "packages": [ { diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index cee360db7..2f1549075 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -483,6 +483,8 @@ type Histogram struct { // histograms. PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` } func (x *Histogram) Reset() { @@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 { return nil } +func (x *Histogram) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { @@ -923,6 +932,7 @@ type MetricFamily struct { Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` } func (x *MetricFamily) Reset() { @@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric { return nil } +func (x *MetricFamily) GetUnit() string { + if x != nil && x.Unit != nil { + return *x.Unit + } + return "" +} + var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ @@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, @@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, - 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, - 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, + 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, + 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, - 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, - 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, + 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, - 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, - 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, - 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, - 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, - 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, - 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, - 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, + 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, + 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, + 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, + 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, + 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, + 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, } var ( @@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 19, // [19:19] is the sub-list for method output_type - 19, // [19:19] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar + 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 8fd806184..7f6cbe7d2 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -139,7 +139,13 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // interface is kept for backwards compatibility. // In cases where the Format does not allow for UTF-8 names, the global // NameEscapingScheme will be applied. -func NewEncoder(w io.Writer, format Format) Encoder { +// +// NewEncoder can be called with additional options to customize the OpenMetrics text output. +// For example: +// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines()) +// +// Extra options are ignored for all other formats. +func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { escapingScheme := format.ToEscapingScheme() switch format.FormatType() { @@ -178,7 +184,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme)) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 6fc9555e3..051b38cd1 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,6 +15,7 @@ package expfmt import ( + "fmt" "strings" "github.com/prometheus/common/model" @@ -63,7 +64,7 @@ const ( type FormatType int const ( - TypeUnknown = iota + TypeUnknown FormatType = iota TypeProtoCompact TypeProtoDelim TypeProtoText @@ -73,7 +74,8 @@ const ( // NewFormat generates a new Format from the type provided. Mostly used for // tests, most Formats should be generated as part of content negotiation in -// encode.go. +// encode.go. If a type has more than one version, the latest version will be +// returned. func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: @@ -91,13 +93,21 @@ func NewFormat(t FormatType) Format { } } +// NewOpenMetricsFormat generates a new OpenMetrics format matching the +// specified version number. +func NewOpenMetricsFormat(version string) (Format, error) { + if version == OpenMetricsVersion_0_0_1 { + return fmtOpenMetrics_0_0_1, nil + } + if version == OpenMetricsVersion_1_0_0 { + return fmtOpenMetrics_1_0_0, nil + } + return fmtUnknown, fmt.Errorf("unknown open metrics version string") +} + // FormatType deduces an overall FormatType for the given format. func (f Format) FormatType() FormatType { toks := strings.Split(string(f), ";") - if len(toks) < 2 { - return TypeUnknown - } - params := make(map[string]string) for i, t := range toks { if i == 0 { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 5622578ed..353c5e93f 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,47 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) +type encoderOption struct { + withCreatedLines bool + withUnit bool +} + +type EncoderOption func(*encoderOption) + +// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder +// to include _created lines (See +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// Created timestamps can improve the accuracy of series reset detection, but +// come with a bandwidth cost. +// +// At the time of writing, created timestamp ingestion is still experimental in +// Prometheus and need to be enabled with the feature-flag +// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are +// still possible. Therefore, it is recommended to use this feature with caution. +func WithCreatedLines() EncoderOption { + return func(t *encoderOption) { + t.withCreatedLines = true + } +} + +// WithUnit is an EncoderOption enabling a set unit to be written to the output +// and to be added to the metric name, if it's not there already, as a suffix. +// Without opting in this way, the unit will not be added to the metric name and, +// on top of that, the unit will not be passed onto the output, even if it +// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil. +func WithUnit() EncoderOption { + return func(t *encoderOption) { + t.withUnit = true + } +} + // MetricFamilyToOpenMetrics converts a MetricFamily proto message into the // OpenMetrics text format and writes the resulting lines to 'out'. It returns // the number of bytes written and any error encountered. The output will have @@ -59,20 +95,34 @@ import ( // Prometheus to OpenMetrics or vice versa: // // - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, +// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT` +// lines. A counter with a missing `_total` suffix is not an error. However, // its type will be set to `unknown` in that case to avoid invalid OpenMetrics // output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - According to the OM specs, the `# UNIT` line is optional, but if populated, +// the unit has to be present in the metric name as its suffix: +// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// However, in order to accommodate any potential scenario where such a change in the +// metric name is not desirable, the users are here given the choice of either explicitly +// opt in, in case they wish for the unit to be included in the output AND in the metric name +// as a suffix (see the description of the WithUnit function above), +// or not to opt in, in case they don't want for any of that to happen. +// +// - No support for the following (optional) features: info type, +// stateset type, gaugehistogram type. // // - The size of exemplar labels is not checked (i.e. it's possible to create // exemplars that are larger than allowed by the OpenMetrics specification). // // - The value of Counters is not checked. (OpenMetrics doesn't allow counters // with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) { + toOM := encoderOption{} + for _, option := range options { + option(&toOM) + } + name := in.GetName() if name == "" { return 0, fmt.Errorf("MetricFamily has no name: %s", in) @@ -95,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } var ( - n int - metricType = in.GetType() - shortName = name + n int + metricType = in.GetType() + compliantName = name ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { + compliantName = name[:len(name)-6] + } + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { + compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) } // Comments, first HELP, then TYPE. @@ -110,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = writeName(w, shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -136,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = writeName(w, shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -163,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } + if toOM.withUnit && in.Unit != nil { + n, err = w.WriteString("# UNIT ") + written += n + if err != nil { + return + } + n, err = writeName(w, compliantName) + written += n + if err != nil { + return + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Unit, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + + var createdTsBytesWritten int // Finally the samples, one line for each. + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + compliantName = compliantName + "_total" + } for _, metric := range in.Metric { switch metricType { case dto.MetricType_COUNTER: if metric.Counter == nil { return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, + "expected counter in metric %s %s", compliantName, metric, ) } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Counter.GetValue(), 0, false, metric.Counter.Exemplar, ) + if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_GAUGE: if metric.Gauge == nil { return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, + "expected gauge in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Gauge.GetValue(), 0, false, nil, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, + "expected untyped in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Untyped.GetValue(), 0, false, nil, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, + "expected summary in metric %s %s", compliantName, metric, ) } for _, q := range metric.Summary.Quantile { n, err = writeOpenMetricsSample( - w, name, "", metric, + w, compliantName, "", metric, model.QuantileLabel, q.GetQuantile(), q.GetValue(), 0, false, nil, @@ -222,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), 0, false, nil, ) @@ -231,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Summary.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, + "expected histogram in metric %s %s", compliantName, metric, ) } infSeen := false for _, b := range metric.Histogram.Bucket { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), 0, b.GetCumulativeCount(), true, b.Exemplar, @@ -259,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } if !infSeen { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, math.Inf(+1), 0, metric.Histogram.GetSampleCount(), true, nil, @@ -270,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), 0, false, nil, ) @@ -279,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Histogram.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp()) + n += createdTsBytesWritten + } default: return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, + "unexpected type in metric %s %s", compliantName, metric, ) } written += n @@ -350,7 +445,7 @@ func writeOpenMetricsSample( return written, err } } - if exemplar != nil { + if exemplar != nil && len(exemplar.Label) > 0 { n, err = writeExemplar(w, exemplar) written += n if err != nil { @@ -473,6 +568,49 @@ func writeOpenMetricsNameAndLabelPairs( return written, nil } +// writeOpenMetricsCreated writes the created timestamp for a single time series +// following OpenMetrics text format to w, given the metric name, the metric proto +// message itself, optionally a suffix to be removed, e.g. '_total' for counters, +// an additional label name with a float64 value (use empty string as label name if +// not required) and the timestamp that represents the created timestamp. +// The function returns the number of bytes written and any error encountered. +func writeOpenMetricsCreated(w enhancedWriter, + name, suffixToTrim string, metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + createdTimestamp *timestamppb.Timestamp, +) (int, error) { + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + // writeExemplar writes the provided exemplar in OpenMetrics format to w. The // function returns the number of bytes written and any error encountered. func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index 6eda08a73..ec738e624 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -14,10 +14,12 @@ package model import ( + "bytes" "encoding/json" "fmt" + "slices" "sort" - "strings" + "strconv" ) // A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet @@ -129,14 +131,27 @@ func (l LabelSet) Merge(other LabelSet) LabelSet { return result } +// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically. func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + var lna [32]LabelName // On stack to avoid memory allocation for sorting names. + labelNames := lna[:0] + for name := range l { + labelNames = append(labelNames, name) } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) + slices.Sort(labelNames) + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + b.WriteByte('{') + for i, name := range labelNames { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(string(name)) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[name]))) + } + b.WriteByte('}') + return b.String() } // Fingerprint returns the LabelSet's fingerprint. diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 0bd29b3a3..eb865e5a5 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -204,6 +204,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF out := &dto.MetricFamily{ Help: v.Help, Type: v.Type, + Unit: v.Unit, } // If the name is nil, copy as-is, don't try to escape. diff --git a/vendor/github.com/rivo/uniseg/README.md b/vendor/github.com/rivo/uniseg/README.md index 25e934687..a8191b815 100644 --- a/vendor/github.com/rivo/uniseg/README.md +++ b/vendor/github.com/rivo/uniseg/README.md @@ -3,7 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/rivo/uniseg.svg)](https://pkg.go.dev/github.com/rivo/uniseg) [![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/rivo/uniseg) -This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](https://unicode.org/reports/tr29/), Unicode Line Breaking according to [Unicode Standard Annex #14](https://unicode.org/reports/tr14/) (Unicode version 14.0.0), and monospace font string width calculation similar to [wcwidth](https://man7.org/linux/man-pages/man3/wcwidth.3.html). +This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](https://unicode.org/reports/tr29/), Unicode Line Breaking according to [Unicode Standard Annex #14](https://unicode.org/reports/tr14/) (Unicode version 15.0.0), and monospace font string width calculation similar to [wcwidth](https://man7.org/linux/man-pages/man3/wcwidth.3.html). ## Background @@ -73,7 +73,7 @@ for gr.Next() { ### Using the [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) Function -This is orders of magnitude faster than the `Graphemes` class, but it requires the handling of states and boundaries: +This avoids allocating a new `Graphemes` object but it requires the handling of states and boundaries: ```go str := "🇩🇪🏳️‍🌈" @@ -88,29 +88,7 @@ for len(str) > 0 { ### Advanced Examples -Breaking into grapheme clusters and evaluating line breaks: - -```go -str := "First line.\nSecond line." -state := -1 -var ( - c string - boundaries int -) -for len(str) > 0 { - c, str, boundaries, state = uniseg.StepString(str, state) - fmt.Print(c) - if boundaries&uniseg.MaskLine == uniseg.LineCanBreak { - fmt.Print("|") - } else if boundaries&uniseg.MaskLine == uniseg.LineMustBreak { - fmt.Print("‖") - } -} -// First |line. -// ‖Second |line.‖ -``` - -If you're only interested in word segmentation, use [`FirstWord`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWord) or [`FirstWordInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWordInString): +The [`Graphemes`](https://pkg.go.dev/github.com/rivo/uniseg#Graphemes) class offers the most convenient way to access all functionality of this package. But in some cases, it may be better to use the specialized functions directly. For example, if you're only interested in word segmentation, use [`FirstWord`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWord) or [`FirstWordInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWordInString): ```go str := "Hello, world!" @@ -133,6 +111,8 @@ Similarly, use - [`FirstSentence`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentence) or [`FirstSentenceInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentenceInString) for sentence segmentation only, and - [`FirstLineSegment`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegment) or [`FirstLineSegmentInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegmentInString) for line breaking / word wrapping (although using [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) is preferred as it will observe grapheme cluster boundaries). +If you're only interested in the width of characters, use [`FirstGraphemeCluster`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeCluster) or [`FirstGraphemeClusterInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeClusterInString). It is much faster than using [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step), [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString), or the [`Graphemes`](https://pkg.go.dev/github.com/rivo/uniseg#Graphemes) class because it does not include the logic for word / sentence / line boundaries. + Finally, if you need to reverse a string while preserving grapheme clusters, use [`ReverseString`](https://pkg.go.dev/github.com/rivo/uniseg#ReverseString): ```go diff --git a/vendor/github.com/rivo/uniseg/eastasianwidth.go b/vendor/github.com/rivo/uniseg/eastasianwidth.go index 661934ac2..5fc54d991 100644 --- a/vendor/github.com/rivo/uniseg/eastasianwidth.go +++ b/vendor/github.com/rivo/uniseg/eastasianwidth.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // eastAsianWidth are taken from -// https://www.unicode.org/Public/14.0.0/ucd/EastAsianWidth.txt +// https://www.unicode.org/Public/15.0.0/ucd/EastAsianWidth.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var eastAsianWidth = [][3]int{ {0x0000, 0x001F, prN}, // Cc [32] .. @@ -504,6 +504,7 @@ var eastAsianWidth = [][3]int{ {0x0CE2, 0x0CE3, prN}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prN}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prN}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prN}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prN}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prN}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prN}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -565,7 +566,7 @@ var eastAsianWidth = [][3]int{ {0x0EBD, 0x0EBD, prN}, // Lo LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prN}, // Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prN}, // Lm LAO KO LA - {0x0EC8, 0x0ECD, prN}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prN}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prN}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prN}, // Lo [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prN}, // Lo TIBETAN SYLLABLE OM @@ -1916,6 +1917,7 @@ var eastAsianWidth = [][3]int{ {0x10EAB, 0x10EAC, prN}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EAD, 0x10EAD, prN}, // Pd YEZIDI HYPHENATION MARK {0x10EB0, 0x10EB1, prN}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prN}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prN}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F1D, 0x10F26, prN}, // No [10] OLD SOGDIAN NUMBER ONE..OLD SOGDIAN FRACTION ONE HALF {0x10F27, 0x10F27, prN}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH @@ -1998,6 +2000,8 @@ var eastAsianWidth = [][3]int{ {0x11236, 0x11237, prN}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x11238, 0x1123D, prN}, // Po [6] KHOJKI DANDA..KHOJKI ABBREVIATION SIGN {0x1123E, 0x1123E, prN}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prN}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prN}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prN}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prN}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prN}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -2160,6 +2164,7 @@ var eastAsianWidth = [][3]int{ {0x11A9E, 0x11AA2, prN}, // Po [5] SOYOMBO HEAD MARK WITH MOON AND SUN AND TRIPLE FLAME..SOYOMBO TERMINAL MARK-2 {0x11AB0, 0x11ABF, prN}, // Lo [16] CANADIAN SYLLABICS NATTILIK HI..CANADIAN SYLLABICS SPA {0x11AC0, 0x11AF8, prN}, // Lo [57] PAU CIN HAU LETTER PA..PAU CIN HAU GLOTTAL STOP FINAL + {0x11B00, 0x11B09, prN}, // Po [10] DEVANAGARI HEAD MARK..DEVANAGARI SIGN MINDU {0x11C00, 0x11C08, prN}, // Lo [9] BHAIKSUKI LETTER A..BHAIKSUKI LETTER VOCALIC L {0x11C0A, 0x11C2E, prN}, // Lo [37] BHAIKSUKI LETTER E..BHAIKSUKI LETTER HA {0x11C2F, 0x11C2F, prN}, // Mc BHAIKSUKI VOWEL SIGN AA @@ -2205,6 +2210,19 @@ var eastAsianWidth = [][3]int{ {0x11EF3, 0x11EF4, prN}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prN}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prN}, // Po [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prN}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prN}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prN}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prN}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prN}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prN}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prN}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prN}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prN}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prN}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prN}, // Mn KAWI CONJOINER + {0x11F43, 0x11F4F, prN}, // Po [13] KAWI DANDA..KAWI PUNCTUATION CLOSING SPIRAL + {0x11F50, 0x11F59, prN}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prN}, // Lo LISU LETTER YHA {0x11FC0, 0x11FD4, prN}, // No [21] TAMIL FRACTION ONE THREE-HUNDRED-AND-TWENTIETH..TAMIL FRACTION DOWNSCALING FACTOR KIIZH {0x11FD5, 0x11FDC, prN}, // So [8] TAMIL SIGN NEL..TAMIL SIGN MUKKURUNI @@ -2217,8 +2235,11 @@ var eastAsianWidth = [][3]int{ {0x12480, 0x12543, prN}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prN}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 {0x12FF1, 0x12FF2, prN}, // Po [2] CYPRO-MINOAN SIGN CM301..CYPRO-MINOAN SIGN CM302 - {0x13000, 0x1342E, prN}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prN}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prN}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prN}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prN}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prN}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prN}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prN}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prN}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prN}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -2263,7 +2284,9 @@ var eastAsianWidth = [][3]int{ {0x1AFFD, 0x1AFFE, prW}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B0FF, prW}, // Lo [256] KATAKANA LETTER ARCHAIC E..HENTAIGANA LETTER RE-2 {0x1B100, 0x1B122, prW}, // Lo [35] HENTAIGANA LETTER RE-3..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prW}, // Lo HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prW}, // Lo [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prW}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prW}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prW}, // Lo [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prN}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -2294,6 +2317,7 @@ var eastAsianWidth = [][3]int{ {0x1D200, 0x1D241, prN}, // So [66] GREEK VOCAL NOTATION SYMBOL-1..GREEK INSTRUMENTAL NOTATION SYMBOL-54 {0x1D242, 0x1D244, prN}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME {0x1D245, 0x1D245, prN}, // So GREEK MUSICAL LEIMMA + {0x1D2C0, 0x1D2D3, prN}, // No [20] KAKTOVIK NUMERAL ZERO..KAKTOVIK NUMERAL NINETEEN {0x1D2E0, 0x1D2F3, prN}, // No [20] MAYAN NUMERAL ZERO..MAYAN NUMERAL NINETEEN {0x1D300, 0x1D356, prN}, // So [87] MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING {0x1D360, 0x1D378, prN}, // No [25] COUNTING ROD UNIT DIGIT ONE..TALLY MARK FIVE @@ -2353,11 +2377,14 @@ var eastAsianWidth = [][3]int{ {0x1DF00, 0x1DF09, prN}, // Ll [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prN}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prN}, // Ll [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prN}, // Ll [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prN}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prN}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prN}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prN}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prN}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prN}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prN}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prN}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prN}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prN}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -2370,6 +2397,10 @@ var eastAsianWidth = [][3]int{ {0x1E2EC, 0x1E2EF, prN}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prN}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE {0x1E2FF, 0x1E2FF, prN}, // Sc WANCHO NGUN SIGN + {0x1E4D0, 0x1E4EA, prN}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prN}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prN}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prN}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prN}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prN}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prN}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -2498,13 +2529,14 @@ var eastAsianWidth = [][3]int{ {0x1F6D0, 0x1F6D2, prW}, // So [3] PLACE OF WORSHIP..SHOPPING TROLLEY {0x1F6D3, 0x1F6D4, prN}, // So [2] STUPA..PAGODA {0x1F6D5, 0x1F6D7, prW}, // So [3] HINDU TEMPLE..ELEVATOR - {0x1F6DD, 0x1F6DF, prW}, // So [3] PLAYGROUND SLIDE..RING BUOY + {0x1F6DC, 0x1F6DF, prW}, // So [4] WIRELESS..RING BUOY {0x1F6E0, 0x1F6EA, prN}, // So [11] HAMMER AND WRENCH..NORTHEAST-POINTING AIRPLANE {0x1F6EB, 0x1F6EC, prW}, // So [2] AIRPLANE DEPARTURE..AIRPLANE ARRIVING {0x1F6F0, 0x1F6F3, prN}, // So [4] SATELLITE..PASSENGER SHIP {0x1F6F4, 0x1F6FC, prW}, // So [9] SCOOTER..ROLLER SKATE - {0x1F700, 0x1F773, prN}, // So [116] ALCHEMICAL SYMBOL FOR QUINTESSENCE..ALCHEMICAL SYMBOL FOR HALF OUNCE - {0x1F780, 0x1F7D8, prN}, // So [89] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..NEGATIVE CIRCLED SQUARE + {0x1F700, 0x1F776, prN}, // So [119] ALCHEMICAL SYMBOL FOR QUINTESSENCE..LUNAR ECLIPSE + {0x1F77B, 0x1F77F, prN}, // So [5] HAUMEA..ORCUS + {0x1F780, 0x1F7D9, prN}, // So [90] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..NINE POINTED WHITE STAR {0x1F7E0, 0x1F7EB, prW}, // So [12] LARGE ORANGE CIRCLE..LARGE BROWN SQUARE {0x1F7F0, 0x1F7F0, prW}, // So HEAVY EQUALS SIGN {0x1F800, 0x1F80B, prN}, // So [12] LEFTWARDS ARROW WITH SMALL TRIANGLE ARROWHEAD..DOWNWARDS ARROW WITH LARGE TRIANGLE ARROWHEAD @@ -2521,22 +2553,20 @@ var eastAsianWidth = [][3]int{ {0x1F947, 0x1F9FF, prW}, // So [185] FIRST PLACE MEDAL..NAZAR AMULET {0x1FA00, 0x1FA53, prN}, // So [84] NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP {0x1FA60, 0x1FA6D, prN}, // So [14] XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER - {0x1FA70, 0x1FA74, prW}, // So [5] BALLET SHOES..THONG SANDAL - {0x1FA78, 0x1FA7C, prW}, // So [5] DROP OF BLOOD..CRUTCH - {0x1FA80, 0x1FA86, prW}, // So [7] YO-YO..NESTING DOLLS - {0x1FA90, 0x1FAAC, prW}, // So [29] RINGED PLANET..HAMSA - {0x1FAB0, 0x1FABA, prW}, // So [11] FLY..NEST WITH EGGS - {0x1FAC0, 0x1FAC5, prW}, // So [6] ANATOMICAL HEART..PERSON WITH CROWN - {0x1FAD0, 0x1FAD9, prW}, // So [10] BLUEBERRIES..JAR - {0x1FAE0, 0x1FAE7, prW}, // So [8] MELTING FACE..BUBBLES - {0x1FAF0, 0x1FAF6, prW}, // So [7] HAND WITH INDEX FINGER AND THUMB CROSSED..HEART HANDS + {0x1FA70, 0x1FA7C, prW}, // So [13] BALLET SHOES..CRUTCH + {0x1FA80, 0x1FA88, prW}, // So [9] YO-YO..FLUTE + {0x1FA90, 0x1FABD, prW}, // So [46] RINGED PLANET..WING + {0x1FABF, 0x1FAC5, prW}, // So [7] GOOSE..PERSON WITH CROWN + {0x1FACE, 0x1FADB, prW}, // So [14] MOOSE..PEA POD + {0x1FAE0, 0x1FAE8, prW}, // So [9] MELTING FACE..SHAKING FACE + {0x1FAF0, 0x1FAF8, prW}, // So [9] HAND WITH INDEX FINGER AND THUMB CROSSED..RIGHTWARDS PUSHING HAND {0x1FB00, 0x1FB92, prN}, // So [147] BLOCK SEXTANT-1..UPPER HALF INVERSE MEDIUM SHADE AND LOWER HALF BLOCK {0x1FB94, 0x1FBCA, prN}, // So [55] LEFT HALF INVERSE MEDIUM SHADE AND RIGHT HALF BLOCK..WHITE UP-POINTING CHEVRON {0x1FBF0, 0x1FBF9, prN}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x20000, 0x2A6DF, prW}, // Lo [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF {0x2A6E0, 0x2A6FF, prW}, // Cn [32] .. - {0x2A700, 0x2B738, prW}, // Lo [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 - {0x2B739, 0x2B73F, prW}, // Cn [7] .. + {0x2A700, 0x2B739, prW}, // Lo [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 + {0x2B73A, 0x2B73F, prW}, // Cn [6] .. {0x2B740, 0x2B81D, prW}, // Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B81E, 0x2B81F, prW}, // Cn [2] .. {0x2B820, 0x2CEA1, prW}, // Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 @@ -2547,7 +2577,9 @@ var eastAsianWidth = [][3]int{ {0x2FA1E, 0x2FA1F, prW}, // Cn [2] .. {0x2FA20, 0x2FFFD, prW}, // Cn [1502] .. {0x30000, 0x3134A, prW}, // Lo [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A - {0x3134B, 0x3FFFD, prW}, // Cn [60595] .. + {0x3134B, 0x3134F, prW}, // Cn [5] .. + {0x31350, 0x323AF, prW}, // Lo [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF + {0x323B0, 0x3FFFD, prW}, // Cn [56398] .. {0xE0001, 0xE0001, prN}, // Cf LANGUAGE TAG {0xE0020, 0xE007F, prN}, // Cf [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prA}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/emojipresentation.go b/vendor/github.com/rivo/uniseg/emojipresentation.go index fd0f7451a..9b5f499c4 100644 --- a/vendor/github.com/rivo/uniseg/emojipresentation.go +++ b/vendor/github.com/rivo/uniseg/emojipresentation.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // emojiPresentation are taken from // // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var emojiPresentation = [][3]int{ {0x231A, 0x231B, prEmojiPresentation}, // E0.6 [2] (⌚..⌛) watch..hourglass done @@ -211,6 +211,7 @@ var emojiPresentation = [][3]int{ {0x1F6D1, 0x1F6D2, prEmojiPresentation}, // E3.0 [2] (🛑..🛒) stop sign..shopping cart {0x1F6D5, 0x1F6D5, prEmojiPresentation}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prEmojiPresentation}, // E13.0 [2] (🛖..🛗) hut..elevator + {0x1F6DC, 0x1F6DC, prEmojiPresentation}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prEmojiPresentation}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6EB, 0x1F6EC, prEmojiPresentation}, // E1.0 [2] (🛫..🛬) airplane departure..airplane arrival {0x1F6F4, 0x1F6F6, prEmojiPresentation}, // E3.0 [3] (🛴..🛶) kick scooter..canoe @@ -267,19 +268,28 @@ var emojiPresentation = [][3]int{ {0x1F9E7, 0x1F9FF, prEmojiPresentation}, // E11.0 [25] (🧧..🧿) red envelope..nazar amulet {0x1FA70, 0x1FA73, prEmojiPresentation}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prEmojiPresentation}, // E13.0 [1] (🩴) thong sandal + {0x1FA75, 0x1FA77, prEmojiPresentation}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prEmojiPresentation}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prEmojiPresentation}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA80, 0x1FA82, prEmojiPresentation}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prEmojiPresentation}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls + {0x1FA87, 0x1FA88, prEmojiPresentation}, // E15.0 [2] (🪇..🪈) maracas..flute {0x1FA90, 0x1FA95, prEmojiPresentation}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prEmojiPresentation}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prEmojiPresentation}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa + {0x1FAAD, 0x1FAAF, prEmojiPresentation}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prEmojiPresentation}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prEmojiPresentation}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs + {0x1FABB, 0x1FABD, prEmojiPresentation}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABF, 0x1FABF, prEmojiPresentation}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prEmojiPresentation}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prEmojiPresentation}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown + {0x1FACE, 0x1FACF, prEmojiPresentation}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prEmojiPresentation}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prEmojiPresentation}, // E14.0 [3] (🫗..🫙) pouring liquid..jar + {0x1FADA, 0x1FADB, prEmojiPresentation}, // E15.0 [2] (🫚..🫛) ginger root..pea pod {0x1FAE0, 0x1FAE7, prEmojiPresentation}, // E14.0 [8] (🫠..🫧) melting face..bubbles + {0x1FAE8, 0x1FAE8, prEmojiPresentation}, // E15.0 [1] (🫨) shaking face {0x1FAF0, 0x1FAF6, prEmojiPresentation}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands + {0x1FAF7, 0x1FAF8, prEmojiPresentation}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand } diff --git a/vendor/github.com/rivo/uniseg/gen_breaktest.go b/vendor/github.com/rivo/uniseg/gen_breaktest.go index e613c4cd0..6bfbeb5e7 100644 --- a/vendor/github.com/rivo/uniseg/gen_breaktest.go +++ b/vendor/github.com/rivo/uniseg/gen_breaktest.go @@ -32,7 +32,7 @@ import ( // We want to test against a specific version rather than the latest. When the // package is upgraded to a new version, change these to generate new tests. const ( - testCaseURL = `https://www.unicode.org/Public/14.0.0/ucd/auxiliary/%s.txt` + testCaseURL = `https://www.unicode.org/Public/15.0.0/ucd/auxiliary/%s.txt` ) func main() { @@ -76,9 +76,9 @@ func parse(url string) ([]byte, error) { buf := new(bytes.Buffer) buf.Grow(120 << 10) - buf.WriteString(`package uniseg + buf.WriteString(`// Code generated via go generate from gen_breaktest.go. DO NOT EDIT. -// Code generated via go generate from gen_breaktest.go. DO NOT EDIT. +package uniseg // ` + os.Args[3] + ` are Grapheme testcases taken from // ` + url + ` @@ -136,7 +136,9 @@ var ( // // E.g. for the input b="÷ 0020 × 0308 ÷ 1F1E6 ÷" // it will append -// "\u0020\u0308\U0001F1E6" +// +// "\u0020\u0308\U0001F1E6" +// // and "[][]rune{{0x0020,0x0308},{0x1F1E6},}" // to orig and exp respectively. // diff --git a/vendor/github.com/rivo/uniseg/gen_properties.go b/vendor/github.com/rivo/uniseg/gen_properties.go index 999d5efdd..8992d2c5f 100644 --- a/vendor/github.com/rivo/uniseg/gen_properties.go +++ b/vendor/github.com/rivo/uniseg/gen_properties.go @@ -41,8 +41,8 @@ import ( // We want to test against a specific version rather than the latest. When the // package is upgraded to a new version, change these to generate new tests. const ( - propertyURL = `https://www.unicode.org/Public/14.0.0/ucd/%s.txt` - emojiURL = `https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt` + propertyURL = `https://www.unicode.org/Public/15.0.0/ucd/%s.txt` + emojiURL = `https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt` ) // The regular expression for a line containing a code point range property. @@ -178,6 +178,11 @@ func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (stri } } + // Avoid overflow during binary search. + if len(properties) >= 1<<31 { + return "", errors.New("too many properties") + } + // Sort properties. sort.Slice(properties, func(i, j int) bool { left, _ := strconv.ParseUint(properties[i][0], 16, 64) @@ -200,9 +205,9 @@ func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (stri // ` + emojiURL + ` // ("Extended_Pictographic" only)` } - buf.WriteString(`package uniseg + buf.WriteString(`// Code generated via go generate from gen_properties.go. DO NOT EDIT. -// Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg // ` + os.Args[3] + ` are taken from // ` + propertyURL + emojiComment + ` diff --git a/vendor/github.com/rivo/uniseg/grapheme.go b/vendor/github.com/rivo/uniseg/grapheme.go index 0086fc1b2..b12403d43 100644 --- a/vendor/github.com/rivo/uniseg/grapheme.go +++ b/vendor/github.com/rivo/uniseg/grapheme.go @@ -13,9 +13,10 @@ import "unicode/utf8" // well as boundary information and character width is available via the various // methods (see examples below). // -// Using this class to iterate over a string is convenient but it is much slower -// than using this package's [Step] or [StepString] functions or any of the -// other specialized functions starting with "First". +// This class basically wraps the [StepString] parser and provides a convenient +// interface to it. If you are only interested in some parts of this package's +// functionality, using the specialized functions starting with "First" is +// almost always faster. type Graphemes struct { // The original string. original string @@ -222,7 +223,7 @@ func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, new if len(b) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftGraphemePropState } @@ -252,16 +253,14 @@ func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, new return b[:length], b[length:], width, state | (prop << shiftGraphemePropState) } - if r == vs16 { - width = 2 - } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL { - width += runeWidth(r, prop) - } else if firstProp == prExtendedPictographic { + if firstProp == prExtendedPictographic { if r == vs15 { width = 1 - } else { + } else if r == vs16 { width = 2 } + } else if firstProp != prRegionalIndicator && firstProp != prL { + width += runeWidth(r, prop) } length += l @@ -284,7 +283,7 @@ func FirstGraphemeClusterInString(str string, state int) (cluster, rest string, if len(str) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftGraphemePropState } @@ -314,16 +313,14 @@ func FirstGraphemeClusterInString(str string, state int) (cluster, rest string, return str[:length], str[length:], width, state | (prop << shiftGraphemePropState) } - if r == vs16 { - width = 2 - } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL { - width += runeWidth(r, prop) - } else if firstProp == prExtendedPictographic { + if firstProp == prExtendedPictographic { if r == vs15 { width = 1 - } else { + } else if r == vs16 { width = 2 } + } else if firstProp != prRegionalIndicator && firstProp != prL { + width += runeWidth(r, prop) } length += l diff --git a/vendor/github.com/rivo/uniseg/graphemeproperties.go b/vendor/github.com/rivo/uniseg/graphemeproperties.go index a87d140bf..0aff4a619 100644 --- a/vendor/github.com/rivo/uniseg/graphemeproperties.go +++ b/vendor/github.com/rivo/uniseg/graphemeproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // graphemeCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var graphemeCodePoints = [][3]int{ {0x0000, 0x0009, prControl}, // Cc [10] .. @@ -143,6 +143,7 @@ var graphemeCodePoints = [][3]int{ {0x0CCC, 0x0CCD, prExtend}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA {0x0CD5, 0x0CD6, prExtend}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL + {0x0CF3, 0x0CF3, prSpacingMark}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prSpacingMark}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D3B, 0x0D3C, prExtend}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA @@ -172,7 +173,7 @@ var graphemeCodePoints = [][3]int{ {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN {0x0EB3, 0x0EB3, prSpacingMark}, // Lo LAO VOWEL SIGN AM {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS {0x0F35, 0x0F35, prExtend}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA {0x0F37, 0x0F37, prExtend}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS @@ -1336,6 +1337,7 @@ var graphemeCodePoints = [][3]int{ {0x10AE5, 0x10AE6, prExtend}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW {0x10D24, 0x10D27, prExtend}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F46, 0x10F50, prExtend}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW {0x10F82, 0x10F85, prExtend}, // Mn [4] OLD UYGHUR COMBINING DOT ABOVE..OLD UYGHUR COMBINING TWO DOTS BELOW {0x11000, 0x11000, prSpacingMark}, // Mc BRAHMI SIGN CANDRABINDU @@ -1375,6 +1377,7 @@ var graphemeCodePoints = [][3]int{ {0x11235, 0x11235, prSpacingMark}, // Mc KHOJKI SIGN VIRAMA {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x112DF, 0x112DF, prExtend}, // Mn KHUDAWADI SIGN ANUSVARA {0x112E0, 0x112E2, prSpacingMark}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II {0x112E3, 0x112EA, prExtend}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA @@ -1494,7 +1497,18 @@ var graphemeCodePoints = [][3]int{ {0x11D97, 0x11D97, prExtend}, // Mn GUNJALA GONDI VIRAMA {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prSpacingMark}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O - {0x13430, 0x13438, prControl}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prPrepend}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prSpacingMark}, // Mc KAWI SIGN VISARGA + {0x11F34, 0x11F35, prSpacingMark}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prSpacingMark}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prSpacingMark}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x13430, 0x1343F, prControl}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x16AF0, 0x16AF4, prExtend}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE {0x16B30, 0x16B36, prExtend}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM {0x16F4F, 0x16F4F, prExtend}, // Mn MIAO SIGN CONSONANT MODIFIER BAR @@ -1527,9 +1541,11 @@ var graphemeCodePoints = [][3]int{ {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E2AE, 0x1E2AE, prExtend}, // Mn TOTO SIGN RISING TONE {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH {0x1E8D0, 0x1E8D6, prExtend}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS {0x1E944, 0x1E94A, prExtend}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA {0x1F000, 0x1F003, prExtendedPictographic}, // E0.0 [4] (🀀..🀃) MAHJONG TILE EAST WIND..MAHJONG TILE NORTH WIND @@ -1780,7 +1796,8 @@ var graphemeCodePoints = [][3]int{ {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // E0.0 [2] (🛓..🛔) STUPA..PAGODA {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prExtendedPictographic}, // E13.0 [2] (🛖..🛗) hut..elevator - {0x1F6D8, 0x1F6DC, prExtendedPictographic}, // E0.0 [5] (🛘..🛜) .. + {0x1F6D8, 0x1F6DB, prExtendedPictographic}, // E0.0 [4] (🛘..🛛) .. + {0x1F6DC, 0x1F6DC, prExtendedPictographic}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prExtendedPictographic}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6E0, 0x1F6E5, prExtendedPictographic}, // E0.7 [6] (🛠️..🛥️) hammer and wrench..motor boat {0x1F6E6, 0x1F6E8, prExtendedPictographic}, // E0.0 [3] (🛦..🛨) UP-POINTING MILITARY AIRPLANE..UP-POINTING SMALL AIRPLANE @@ -1797,7 +1814,7 @@ var graphemeCodePoints = [][3]int{ {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // E12.0 [1] (🛺) auto rickshaw {0x1F6FB, 0x1F6FC, prExtendedPictographic}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate {0x1F6FD, 0x1F6FF, prExtendedPictographic}, // E0.0 [3] (🛽..🛿) .. - {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) .. + {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) LOT OF FORTUNE..ORCUS {0x1F7D5, 0x1F7DF, prExtendedPictographic}, // E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE.. {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // E12.0 [12] (🟠..🟫) orange circle..brown square {0x1F7EC, 0x1F7EF, prExtendedPictographic}, // E0.0 [4] (🟬..🟯) .. @@ -1856,30 +1873,37 @@ var graphemeCodePoints = [][3]int{ {0x1FA00, 0x1FA6F, prExtendedPictographic}, // E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.. {0x1FA70, 0x1FA73, prExtendedPictographic}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prExtendedPictographic}, // E13.0 [1] (🩴) thong sandal - {0x1FA75, 0x1FA77, prExtendedPictographic}, // E0.0 [3] (🩵..🩷) .. + {0x1FA75, 0x1FA77, prExtendedPictographic}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prExtendedPictographic}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prExtendedPictographic}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA7D, 0x1FA7F, prExtendedPictographic}, // E0.0 [3] (🩽..🩿) .. {0x1FA80, 0x1FA82, prExtendedPictographic}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prExtendedPictographic}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls - {0x1FA87, 0x1FA8F, prExtendedPictographic}, // E0.0 [9] (🪇..🪏) .. + {0x1FA87, 0x1FA88, prExtendedPictographic}, // E15.0 [2] (🪇..🪈) maracas..flute + {0x1FA89, 0x1FA8F, prExtendedPictographic}, // E0.0 [7] (🪉..🪏) .. {0x1FA90, 0x1FA95, prExtendedPictographic}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prExtendedPictographic}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prExtendedPictographic}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa - {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E0.0 [3] (🪭..🪯) .. + {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prExtendedPictographic}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prExtendedPictographic}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs - {0x1FABB, 0x1FABF, prExtendedPictographic}, // E0.0 [5] (🪻..🪿) .. + {0x1FABB, 0x1FABD, prExtendedPictographic}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABE, 0x1FABE, prExtendedPictographic}, // E0.0 [1] (🪾) + {0x1FABF, 0x1FABF, prExtendedPictographic}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prExtendedPictographic}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prExtendedPictographic}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown - {0x1FAC6, 0x1FACF, prExtendedPictographic}, // E0.0 [10] (🫆..🫏) .. + {0x1FAC6, 0x1FACD, prExtendedPictographic}, // E0.0 [8] (🫆..🫍) .. + {0x1FACE, 0x1FACF, prExtendedPictographic}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prExtendedPictographic}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prExtendedPictographic}, // E14.0 [3] (🫗..🫙) pouring liquid..jar - {0x1FADA, 0x1FADF, prExtendedPictographic}, // E0.0 [6] (🫚..🫟) .. + {0x1FADA, 0x1FADB, prExtendedPictographic}, // E15.0 [2] (🫚..🫛) ginger root..pea pod + {0x1FADC, 0x1FADF, prExtendedPictographic}, // E0.0 [4] (🫜..🫟) .. {0x1FAE0, 0x1FAE7, prExtendedPictographic}, // E14.0 [8] (🫠..🫧) melting face..bubbles - {0x1FAE8, 0x1FAEF, prExtendedPictographic}, // E0.0 [8] (🫨..🫯) .. + {0x1FAE8, 0x1FAE8, prExtendedPictographic}, // E15.0 [1] (🫨) shaking face + {0x1FAE9, 0x1FAEF, prExtendedPictographic}, // E0.0 [7] (🫩..🫯) .. {0x1FAF0, 0x1FAF6, prExtendedPictographic}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands - {0x1FAF7, 0x1FAFF, prExtendedPictographic}, // E0.0 [9] (🫷..🫿) .. + {0x1FAF7, 0x1FAF8, prExtendedPictographic}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand + {0x1FAF9, 0x1FAFF, prExtendedPictographic}, // E0.0 [7] (🫹..🫿) .. {0x1FC00, 0x1FFFD, prExtendedPictographic}, // E0.0[1022] (🰀..🿽) .. {0xE0000, 0xE0000, prControl}, // Cn {0xE0001, 0xE0001, prControl}, // Cf LANGUAGE TAG diff --git a/vendor/github.com/rivo/uniseg/graphemerules.go b/vendor/github.com/rivo/uniseg/graphemerules.go index 9f46b575b..5d399d29c 100644 --- a/vendor/github.com/rivo/uniseg/graphemerules.go +++ b/vendor/github.com/rivo/uniseg/graphemerules.go @@ -21,11 +21,12 @@ const ( grBoundary ) -// The grapheme cluster parser's state transitions. Maps (state, property) to -// (new state, breaking instruction, rule number). The breaking instruction -// always refers to the boundary between the last and next code point. +// grTransitions implements the grapheme cluster parser's state transitions. +// Maps state and property to a new state, a breaking instruction, and rule +// number. The breaking instruction always refers to the boundary between the +// last and next code point. Returns negative values if no transition is found. // -// This map is queried as follows: +// This function is used as follows: // // 1. Find specific state + specific property. Stop if found. // 2. Find specific state + any property. @@ -36,59 +37,96 @@ const ( // are equal. Stop. // 6. Assume grAny and grBoundary. // -// Unicode version 14.0.0. -var grTransitions = map[[2]int][3]int{ +// Unicode version 15.0.0. +func grTransitions(state, prop int) (newState int, newProp int, boundary int) { + // It turns out that using a big switch statement is much faster than using + // a map. + + switch uint64(state) | uint64(prop)<<32 { // GB5 - {grAny, prCR}: {grCR, grBoundary, 50}, - {grAny, prLF}: {grControlLF, grBoundary, 50}, - {grAny, prControl}: {grControlLF, grBoundary, 50}, + case grAny | prCR<<32: + return grCR, grBoundary, 50 + case grAny | prLF<<32: + return grControlLF, grBoundary, 50 + case grAny | prControl<<32: + return grControlLF, grBoundary, 50 // GB4 - {grCR, prAny}: {grAny, grBoundary, 40}, - {grControlLF, prAny}: {grAny, grBoundary, 40}, + case grCR | prAny<<32: + return grAny, grBoundary, 40 + case grControlLF | prAny<<32: + return grAny, grBoundary, 40 - // GB3. - {grCR, prLF}: {grControlLF, grNoBoundary, 30}, + // GB3 + case grCR | prLF<<32: + return grControlLF, grNoBoundary, 30 - // GB6. - {grAny, prL}: {grL, grBoundary, 9990}, - {grL, prL}: {grL, grNoBoundary, 60}, - {grL, prV}: {grLVV, grNoBoundary, 60}, - {grL, prLV}: {grLVV, grNoBoundary, 60}, - {grL, prLVT}: {grLVTT, grNoBoundary, 60}, + // GB6 + case grAny | prL<<32: + return grL, grBoundary, 9990 + case grL | prL<<32: + return grL, grNoBoundary, 60 + case grL | prV<<32: + return grLVV, grNoBoundary, 60 + case grL | prLV<<32: + return grLVV, grNoBoundary, 60 + case grL | prLVT<<32: + return grLVTT, grNoBoundary, 60 - // GB7. - {grAny, prLV}: {grLVV, grBoundary, 9990}, - {grAny, prV}: {grLVV, grBoundary, 9990}, - {grLVV, prV}: {grLVV, grNoBoundary, 70}, - {grLVV, prT}: {grLVTT, grNoBoundary, 70}, + // GB7 + case grAny | prLV<<32: + return grLVV, grBoundary, 9990 + case grAny | prV<<32: + return grLVV, grBoundary, 9990 + case grLVV | prV<<32: + return grLVV, grNoBoundary, 70 + case grLVV | prT<<32: + return grLVTT, grNoBoundary, 70 - // GB8. - {grAny, prLVT}: {grLVTT, grBoundary, 9990}, - {grAny, prT}: {grLVTT, grBoundary, 9990}, - {grLVTT, prT}: {grLVTT, grNoBoundary, 80}, + // GB8 + case grAny | prLVT<<32: + return grLVTT, grBoundary, 9990 + case grAny | prT<<32: + return grLVTT, grBoundary, 9990 + case grLVTT | prT<<32: + return grLVTT, grNoBoundary, 80 - // GB9. - {grAny, prExtend}: {grAny, grNoBoundary, 90}, - {grAny, prZWJ}: {grAny, grNoBoundary, 90}, + // GB9 + case grAny | prExtend<<32: + return grAny, grNoBoundary, 90 + case grAny | prZWJ<<32: + return grAny, grNoBoundary, 90 - // GB9a. - {grAny, prSpacingMark}: {grAny, grNoBoundary, 91}, + // GB9a + case grAny | prSpacingMark<<32: + return grAny, grNoBoundary, 91 - // GB9b. - {grAny, prPrepend}: {grPrepend, grBoundary, 9990}, - {grPrepend, prAny}: {grAny, grNoBoundary, 92}, + // GB9b + case grAny | prPrepend<<32: + return grPrepend, grBoundary, 9990 + case grPrepend | prAny<<32: + return grAny, grNoBoundary, 92 - // GB11. - {grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990}, - {grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110}, - {grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110}, - {grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110}, + // GB11 + case grAny | prExtendedPictographic<<32: + return grExtendedPictographic, grBoundary, 9990 + case grExtendedPictographic | prExtend<<32: + return grExtendedPictographic, grNoBoundary, 110 + case grExtendedPictographic | prZWJ<<32: + return grExtendedPictographicZWJ, grNoBoundary, 110 + case grExtendedPictographicZWJ | prExtendedPictographic<<32: + return grExtendedPictographic, grNoBoundary, 110 - // GB12 / GB13. - {grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990}, - {grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120}, - {grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120}, + // GB12 / GB13 + case grAny | prRegionalIndicator<<32: + return grRIOdd, grBoundary, 9990 + case grRIOdd | prRegionalIndicator<<32: + return grRIEven, grNoBoundary, 120 + case grRIEven | prRegionalIndicator<<32: + return grRIOdd, grBoundary, 120 + default: + return -1, -1, -1 + } } // transitionGraphemeState determines the new state of the grapheme cluster @@ -97,40 +135,40 @@ var grTransitions = map[[2]int][3]int{ // table) and whether a cluster boundary was detected. func transitionGraphemeState(state int, r rune) (newState, prop int, boundary bool) { // Determine the property of the next character. - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) // Find the applicable transition. - transition, ok := grTransitions[[2]int{state, prop}] - if ok { + nextState, nextProp, _ := grTransitions(state, prop) + if nextState >= 0 { // We have a specific transition. We'll use it. - return transition[0], prop, transition[1] == grBoundary + return nextState, prop, nextProp == grBoundary } // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := grTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := grTransitions[[2]int{grAny, prop}] - if okAnyProp && okAnyState { + anyPropState, anyPropProp, anyPropRule := grTransitions(state, prAny) + anyStateState, anyStateProp, anyStateRule := grTransitions(grAny, prop) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState = transAnyState[0] - boundary = transAnyState[1] == grBoundary - if transAnyProp[2] < transAnyState[2] { - boundary = transAnyProp[1] == grBoundary + newState = anyStateState + boundary = anyStateProp == grBoundary + if anyPropRule < anyStateRule { + boundary = anyPropProp == grBoundary } return } - if okAnyProp { + if anyPropState >= 0 { // We only have a specific state. - return transAnyProp[0], prop, transAnyProp[1] == grBoundary + return anyPropState, prop, anyPropProp == grBoundary // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. } - if okAnyState { + if anyStateState >= 0 { // We only have a specific property. - return transAnyState[0], prop, transAnyState[1] == grBoundary + return anyStateState, prop, anyStateProp == grBoundary } // No known transition. GB999: Any ÷ Any. diff --git a/vendor/github.com/rivo/uniseg/line.go b/vendor/github.com/rivo/uniseg/line.go index 87f28503f..7a46318d9 100644 --- a/vendor/github.com/rivo/uniseg/line.go +++ b/vendor/github.com/rivo/uniseg/line.go @@ -80,7 +80,7 @@ func FirstLineSegment(b []byte, state int) (segment, rest []byte, mustBreak bool } } -// FirstLineSegmentInString is like FirstLineSegment() but its input and outputs +// FirstLineSegmentInString is like [FirstLineSegment] but its input and outputs // are strings. func FirstLineSegmentInString(str string, state int) (segment, rest string, mustBreak bool, newState int) { // An empty byte slice returns nothing. @@ -122,13 +122,13 @@ func FirstLineSegmentInString(str string, state int) (segment, rest string, must // [UAX #14]: https://www.unicode.org/reports/tr14/#Algorithm func HasTrailingLineBreak(b []byte) bool { r, _ := utf8.DecodeLastRune(b) - property, _ := propertyWithGenCat(lineBreakCodePoints, r) - return property == lbBK || property == lbCR || property == lbLF || property == lbNL + property, _ := propertyLineBreak(r) + return property == prBK || property == prCR || property == prLF || property == prNL } // HasTrailingLineBreakInString is like [HasTrailingLineBreak] but for a string. func HasTrailingLineBreakInString(str string) bool { r, _ := utf8.DecodeLastRuneInString(str) - property, _ := propertyWithGenCat(lineBreakCodePoints, r) - return property == lbBK || property == lbCR || property == lbLF || property == lbNL + property, _ := propertyLineBreak(r) + return property == prBK || property == prCR || property == prLF || property == prNL } diff --git a/vendor/github.com/rivo/uniseg/lineproperties.go b/vendor/github.com/rivo/uniseg/lineproperties.go index 32169306e..ac7fac4c0 100644 --- a/vendor/github.com/rivo/uniseg/lineproperties.go +++ b/vendor/github.com/rivo/uniseg/lineproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // lineBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/LineBreak.txt +// https://www.unicode.org/Public/15.0.0/ucd/LineBreak.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var lineBreakCodePoints = [][4]int{ {0x0000, 0x0008, prCM, gcCc}, // [9] .. @@ -439,6 +439,7 @@ var lineBreakCodePoints = [][4]int{ {0x0CE2, 0x0CE3, prCM, gcMn}, // [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNU, gcNd}, // [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prAL, gcLo}, // [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prCM, gcMc}, // KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prCM, gcMn}, // [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prCM, gcMc}, // [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prAL, gcLo}, // [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -500,7 +501,7 @@ var lineBreakCodePoints = [][4]int{ {0x0EBD, 0x0EBD, prSA, gcLo}, // LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prSA, gcLo}, // [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prSA, gcLm}, // LAO KO LA - {0x0EC8, 0x0ECD, prSA, gcMn}, // [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prSA, gcMn}, // [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNU, gcNd}, // [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prSA, gcLo}, // [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prAL, gcLo}, // TIBETAN SYLLABLE OM @@ -813,7 +814,11 @@ var lineBreakCodePoints = [][4]int{ {0x1D79, 0x1D7F, prAL, gcLl}, // [7] LATIN SMALL LETTER INSULAR G..LATIN SMALL LETTER UPSILON WITH STROKE {0x1D80, 0x1D9A, prAL, gcLl}, // [27] LATIN SMALL LETTER B WITH PALATAL HOOK..LATIN SMALL LETTER EZH WITH RETROFLEX HOOK {0x1D9B, 0x1DBF, prAL, gcLm}, // [37] MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER LETTER SMALL THETA - {0x1DC0, 0x1DFF, prCM, gcMn}, // [64] COMBINING DOTTED GRAVE ACCENT..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW + {0x1DC0, 0x1DCC, prCM, gcMn}, // [13] COMBINING DOTTED GRAVE ACCENT..COMBINING MACRON-BREVE + {0x1DCD, 0x1DCD, prGL, gcMn}, // COMBINING DOUBLE CIRCUMFLEX ABOVE + {0x1DCE, 0x1DFB, prCM, gcMn}, // [46] COMBINING OGONEK ABOVE..COMBINING DELETION MARK + {0x1DFC, 0x1DFC, prGL, gcMn}, // COMBINING DOUBLE INVERTED BREVE BELOW + {0x1DFD, 0x1DFF, prCM, gcMn}, // [3] COMBINING ALMOST EQUAL TO BELOW..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW {0x1E00, 0x1EFF, prAL, gcLC}, // [256] LATIN CAPITAL LETTER A WITH RING BELOW..LATIN SMALL LETTER Y WITH LOOP {0x1F00, 0x1F15, prAL, gcLC}, // [22] GREEK SMALL LETTER ALPHA WITH PSILI..GREEK SMALL LETTER EPSILON WITH DASIA AND OXIA {0x1F18, 0x1F1D, prAL, gcLu}, // [6] GREEK CAPITAL LETTER EPSILON WITH PSILI..GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA @@ -889,7 +894,7 @@ var lineBreakCodePoints = [][4]int{ {0x2054, 0x2054, prAL, gcPc}, // INVERTED UNDERTIE {0x2055, 0x2055, prAL, gcPo}, // FLOWER PUNCTUATION MARK {0x2056, 0x2056, prBA, gcPo}, // THREE DOT PUNCTUATION - {0x2057, 0x2057, prAL, gcPo}, // QUADRUPLE PRIME + {0x2057, 0x2057, prPO, gcPo}, // QUADRUPLE PRIME {0x2058, 0x205B, prBA, gcPo}, // [4] FOUR DOT PUNCTUATION..FOUR DOT MARK {0x205C, 0x205C, prAL, gcPo}, // DOTTED CROSS {0x205D, 0x205E, prBA, gcPo}, // [2] TRICOLON..VERTICAL FOUR DOTS @@ -2751,6 +2756,7 @@ var lineBreakCodePoints = [][4]int{ {0x10EAB, 0x10EAC, prCM, gcMn}, // [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EAD, 0x10EAD, prBA, gcPd}, // YEZIDI HYPHENATION MARK {0x10EB0, 0x10EB1, prAL, gcLo}, // [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prCM, gcMn}, // [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prAL, gcLo}, // [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F1D, 0x10F26, prAL, gcNo}, // [10] OLD SOGDIAN NUMBER ONE..OLD SOGDIAN FRACTION ONE HALF {0x10F27, 0x10F27, prAL, gcLo}, // OLD SOGDIAN LIGATURE AYIN-DALETH @@ -2840,6 +2846,8 @@ var lineBreakCodePoints = [][4]int{ {0x1123B, 0x1123C, prBA, gcPo}, // [2] KHOJKI SECTION MARK..KHOJKI DOUBLE SECTION MARK {0x1123D, 0x1123D, prAL, gcPo}, // KHOJKI ABBREVIATION SIGN {0x1123E, 0x1123E, prCM, gcMn}, // KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prAL, gcLo}, // [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prCM, gcMn}, // KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prAL, gcLo}, // [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prAL, gcLo}, // MULTANI LETTER GHA {0x1128A, 0x1128D, prAL, gcLo}, // [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -3013,6 +3021,7 @@ var lineBreakCodePoints = [][4]int{ {0x11AA1, 0x11AA2, prBA, gcPo}, // [2] SOYOMBO TERMINAL MARK-1..SOYOMBO TERMINAL MARK-2 {0x11AB0, 0x11ABF, prAL, gcLo}, // [16] CANADIAN SYLLABICS NATTILIK HI..CANADIAN SYLLABICS SPA {0x11AC0, 0x11AF8, prAL, gcLo}, // [57] PAU CIN HAU LETTER PA..PAU CIN HAU GLOTTAL STOP FINAL + {0x11B00, 0x11B09, prBB, gcPo}, // [10] DEVANAGARI HEAD MARK..DEVANAGARI SIGN MINDU {0x11C00, 0x11C08, prAL, gcLo}, // [9] BHAIKSUKI LETTER A..BHAIKSUKI LETTER VOCALIC L {0x11C0A, 0x11C2E, prAL, gcLo}, // [37] BHAIKSUKI LETTER E..BHAIKSUKI LETTER HA {0x11C2F, 0x11C2F, prCM, gcMc}, // BHAIKSUKI VOWEL SIGN AA @@ -3059,6 +3068,20 @@ var lineBreakCodePoints = [][4]int{ {0x11EF3, 0x11EF4, prCM, gcMn}, // [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prCM, gcMc}, // [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prAL, gcPo}, // [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prCM, gcMn}, // [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prAL, gcLo}, // KAWI SIGN REPHA + {0x11F03, 0x11F03, prCM, gcMc}, // KAWI SIGN VISARGA + {0x11F04, 0x11F10, prAL, gcLo}, // [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prAL, gcLo}, // [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prCM, gcMc}, // [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prCM, gcMn}, // [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prCM, gcMc}, // [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prCM, gcMn}, // KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prCM, gcMc}, // KAWI SIGN KILLER + {0x11F42, 0x11F42, prCM, gcMn}, // KAWI CONJOINER + {0x11F43, 0x11F44, prBA, gcPo}, // [2] KAWI DANDA..KAWI DOUBLE DANDA + {0x11F45, 0x11F4F, prID, gcPo}, // [11] KAWI PUNCTUATION SECTION MARKER..KAWI PUNCTUATION CLOSING SPIRAL + {0x11F50, 0x11F59, prNU, gcNd}, // [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prAL, gcLo}, // LISU LETTER YHA {0x11FC0, 0x11FD4, prAL, gcNo}, // [21] TAMIL FRACTION ONE THREE-HUNDRED-AND-TWENTIETH..TAMIL FRACTION DOWNSCALING FACTOR KIIZH {0x11FD5, 0x11FDC, prAL, gcSo}, // [8] TAMIL SIGN NEL..TAMIL SIGN MUKKURUNI @@ -3084,10 +3107,18 @@ var lineBreakCodePoints = [][4]int{ {0x1328A, 0x13378, prAL, gcLo}, // [239] EGYPTIAN HIEROGLYPH O037..EGYPTIAN HIEROGLYPH V011 {0x13379, 0x13379, prOP, gcLo}, // EGYPTIAN HIEROGLYPH V011A {0x1337A, 0x1337B, prCL, gcLo}, // [2] EGYPTIAN HIEROGLYPH V011B..EGYPTIAN HIEROGLYPH V011C - {0x1337C, 0x1342E, prAL, gcLo}, // [179] EGYPTIAN HIEROGLYPH V012..EGYPTIAN HIEROGLYPH AA032 + {0x1337C, 0x1342F, prAL, gcLo}, // [180] EGYPTIAN HIEROGLYPH V012..EGYPTIAN HIEROGLYPH V011D {0x13430, 0x13436, prGL, gcCf}, // [7] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH OVERLAY MIDDLE {0x13437, 0x13437, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN SEGMENT {0x13438, 0x13438, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END SEGMENT + {0x13439, 0x1343B, prGL, gcCf}, // [3] EGYPTIAN HIEROGLYPH INSERT AT MIDDLE..EGYPTIAN HIEROGLYPH INSERT AT BOTTOM + {0x1343C, 0x1343C, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN ENCLOSURE + {0x1343D, 0x1343D, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END ENCLOSURE + {0x1343E, 0x1343E, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN WALLED ENCLOSURE + {0x1343F, 0x1343F, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prCM, gcMn}, // EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prAL, gcLo}, // [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prCM, gcMn}, // [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x145CD, prAL, gcLo}, // [462] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A409 {0x145CE, 0x145CE, prOP, gcLo}, // ANATOLIAN HIEROGLYPH A410 BEGIN LOGOGRAM MARK {0x145CF, 0x145CF, prCL, gcLo}, // ANATOLIAN HIEROGLYPH A410A END LOGOGRAM MARK @@ -3137,7 +3168,9 @@ var lineBreakCodePoints = [][4]int{ {0x1AFFD, 0x1AFFE, prAL, gcLm}, // [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B0FF, prID, gcLo}, // [256] KATAKANA LETTER ARCHAIC E..HENTAIGANA LETTER RE-2 {0x1B100, 0x1B122, prID, gcLo}, // [35] HENTAIGANA LETTER RE-3..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prCJ, gcLo}, // HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prCJ, gcLo}, // [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prCJ, gcLo}, // KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prCJ, gcLo}, // [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prID, gcLo}, // [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prAL, gcLo}, // [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -3168,6 +3201,7 @@ var lineBreakCodePoints = [][4]int{ {0x1D200, 0x1D241, prAL, gcSo}, // [66] GREEK VOCAL NOTATION SYMBOL-1..GREEK INSTRUMENTAL NOTATION SYMBOL-54 {0x1D242, 0x1D244, prCM, gcMn}, // [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME {0x1D245, 0x1D245, prAL, gcSo}, // GREEK MUSICAL LEIMMA + {0x1D2C0, 0x1D2D3, prAL, gcNo}, // [20] KAKTOVIK NUMERAL ZERO..KAKTOVIK NUMERAL NINETEEN {0x1D2E0, 0x1D2F3, prAL, gcNo}, // [20] MAYAN NUMERAL ZERO..MAYAN NUMERAL NINETEEN {0x1D300, 0x1D356, prAL, gcSo}, // [87] MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING {0x1D360, 0x1D378, prAL, gcNo}, // [25] COUNTING ROD UNIT DIGIT ONE..TALLY MARK FIVE @@ -3228,11 +3262,14 @@ var lineBreakCodePoints = [][4]int{ {0x1DF00, 0x1DF09, prAL, gcLl}, // [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prAL, gcLo}, // LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prAL, gcLl}, // [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prAL, gcLl}, // [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prCM, gcMn}, // [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prCM, gcMn}, // [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prCM, gcMn}, // [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prCM, gcMn}, // [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prCM, gcMn}, // [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prAL, gcLm}, // [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prCM, gcMn}, // COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prAL, gcLo}, // [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prCM, gcMn}, // [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prAL, gcLm}, // [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -3245,6 +3282,10 @@ var lineBreakCodePoints = [][4]int{ {0x1E2EC, 0x1E2EF, prCM, gcMn}, // [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNU, gcNd}, // [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE {0x1E2FF, 0x1E2FF, prPR, gcSc}, // WANCHO NGUN SIGN + {0x1E4D0, 0x1E4EA, prAL, gcLo}, // [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prAL, gcLm}, // NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prCM, gcMn}, // [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNU, gcNd}, // [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prAL, gcLo}, // [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prAL, gcLo}, // [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prAL, gcLo}, // [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -3412,16 +3453,18 @@ var lineBreakCodePoints = [][4]int{ {0x1F6C1, 0x1F6CB, prID, gcSo}, // [11] BATHTUB..COUCH AND LAMP {0x1F6CC, 0x1F6CC, prEB, gcSo}, // SLEEPING ACCOMMODATION {0x1F6CD, 0x1F6D7, prID, gcSo}, // [11] SHOPPING BAGS..ELEVATOR - {0x1F6D8, 0x1F6DC, prID, gcCn}, // [5] .. - {0x1F6DD, 0x1F6EC, prID, gcSo}, // [16] PLAYGROUND SLIDE..AIRPLANE ARRIVING + {0x1F6D8, 0x1F6DB, prID, gcCn}, // [4] .. + {0x1F6DC, 0x1F6EC, prID, gcSo}, // [17] WIRELESS..AIRPLANE ARRIVING {0x1F6ED, 0x1F6EF, prID, gcCn}, // [3] .. {0x1F6F0, 0x1F6FC, prID, gcSo}, // [13] SATELLITE..ROLLER SKATE {0x1F6FD, 0x1F6FF, prID, gcCn}, // [3] .. {0x1F700, 0x1F773, prAL, gcSo}, // [116] ALCHEMICAL SYMBOL FOR QUINTESSENCE..ALCHEMICAL SYMBOL FOR HALF OUNCE - {0x1F774, 0x1F77F, prID, gcCn}, // [12] .. + {0x1F774, 0x1F776, prID, gcSo}, // [3] LOT OF FORTUNE..LUNAR ECLIPSE + {0x1F777, 0x1F77A, prID, gcCn}, // [4] .. + {0x1F77B, 0x1F77F, prID, gcSo}, // [5] HAUMEA..ORCUS {0x1F780, 0x1F7D4, prAL, gcSo}, // [85] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..HEAVY TWELVE POINTED PINWHEEL STAR - {0x1F7D5, 0x1F7D8, prID, gcSo}, // [4] CIRCLED TRIANGLE..NEGATIVE CIRCLED SQUARE - {0x1F7D9, 0x1F7DF, prID, gcCn}, // [7] .. + {0x1F7D5, 0x1F7D9, prID, gcSo}, // [5] CIRCLED TRIANGLE..NINE POINTED WHITE STAR + {0x1F7DA, 0x1F7DF, prID, gcCn}, // [6] .. {0x1F7E0, 0x1F7EB, prID, gcSo}, // [12] LARGE ORANGE CIRCLE..LARGE BROWN SQUARE {0x1F7EC, 0x1F7EF, prID, gcCn}, // [4] .. {0x1F7F0, 0x1F7F0, prID, gcSo}, // HEAVY EQUALS SIGN @@ -3467,33 +3510,29 @@ var lineBreakCodePoints = [][4]int{ {0x1FA54, 0x1FA5F, prID, gcCn}, // [12] .. {0x1FA60, 0x1FA6D, prID, gcSo}, // [14] XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER {0x1FA6E, 0x1FA6F, prID, gcCn}, // [2] .. - {0x1FA70, 0x1FA74, prID, gcSo}, // [5] BALLET SHOES..THONG SANDAL - {0x1FA75, 0x1FA77, prID, gcCn}, // [3] .. - {0x1FA78, 0x1FA7C, prID, gcSo}, // [5] DROP OF BLOOD..CRUTCH + {0x1FA70, 0x1FA7C, prID, gcSo}, // [13] BALLET SHOES..CRUTCH {0x1FA7D, 0x1FA7F, prID, gcCn}, // [3] .. - {0x1FA80, 0x1FA86, prID, gcSo}, // [7] YO-YO..NESTING DOLLS - {0x1FA87, 0x1FA8F, prID, gcCn}, // [9] .. - {0x1FA90, 0x1FAAC, prID, gcSo}, // [29] RINGED PLANET..HAMSA - {0x1FAAD, 0x1FAAF, prID, gcCn}, // [3] .. - {0x1FAB0, 0x1FABA, prID, gcSo}, // [11] FLY..NEST WITH EGGS - {0x1FABB, 0x1FABF, prID, gcCn}, // [5] .. - {0x1FAC0, 0x1FAC2, prID, gcSo}, // [3] ANATOMICAL HEART..PEOPLE HUGGING + {0x1FA80, 0x1FA88, prID, gcSo}, // [9] YO-YO..FLUTE + {0x1FA89, 0x1FA8F, prID, gcCn}, // [7] .. + {0x1FA90, 0x1FABD, prID, gcSo}, // [46] RINGED PLANET..WING + {0x1FABE, 0x1FABE, prID, gcCn}, // + {0x1FABF, 0x1FAC2, prID, gcSo}, // [4] GOOSE..PEOPLE HUGGING {0x1FAC3, 0x1FAC5, prEB, gcSo}, // [3] PREGNANT MAN..PERSON WITH CROWN - {0x1FAC6, 0x1FACF, prID, gcCn}, // [10] .. - {0x1FAD0, 0x1FAD9, prID, gcSo}, // [10] BLUEBERRIES..JAR - {0x1FADA, 0x1FADF, prID, gcCn}, // [6] .. - {0x1FAE0, 0x1FAE7, prID, gcSo}, // [8] MELTING FACE..BUBBLES - {0x1FAE8, 0x1FAEF, prID, gcCn}, // [8] .. - {0x1FAF0, 0x1FAF6, prEB, gcSo}, // [7] HAND WITH INDEX FINGER AND THUMB CROSSED..HEART HANDS - {0x1FAF7, 0x1FAFF, prID, gcCn}, // [9] .. + {0x1FAC6, 0x1FACD, prID, gcCn}, // [8] .. + {0x1FACE, 0x1FADB, prID, gcSo}, // [14] MOOSE..PEA POD + {0x1FADC, 0x1FADF, prID, gcCn}, // [4] .. + {0x1FAE0, 0x1FAE8, prID, gcSo}, // [9] MELTING FACE..SHAKING FACE + {0x1FAE9, 0x1FAEF, prID, gcCn}, // [7] .. + {0x1FAF0, 0x1FAF8, prEB, gcSo}, // [9] HAND WITH INDEX FINGER AND THUMB CROSSED..RIGHTWARDS PUSHING HAND + {0x1FAF9, 0x1FAFF, prID, gcCn}, // [7] .. {0x1FB00, 0x1FB92, prAL, gcSo}, // [147] BLOCK SEXTANT-1..UPPER HALF INVERSE MEDIUM SHADE AND LOWER HALF BLOCK {0x1FB94, 0x1FBCA, prAL, gcSo}, // [55] LEFT HALF INVERSE MEDIUM SHADE AND RIGHT HALF BLOCK..WHITE UP-POINTING CHEVRON {0x1FBF0, 0x1FBF9, prNU, gcNd}, // [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x1FC00, 0x1FFFD, prID, gcCn}, // [1022] .. {0x20000, 0x2A6DF, prID, gcLo}, // [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF {0x2A6E0, 0x2A6FF, prID, gcCn}, // [32] .. - {0x2A700, 0x2B738, prID, gcLo}, // [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 - {0x2B739, 0x2B73F, prID, gcCn}, // [7] .. + {0x2A700, 0x2B739, prID, gcLo}, // [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 + {0x2B73A, 0x2B73F, prID, gcCn}, // [6] .. {0x2B740, 0x2B81D, prID, gcLo}, // [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B81E, 0x2B81F, prID, gcCn}, // [2] .. {0x2B820, 0x2CEA1, prID, gcLo}, // [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 @@ -3504,7 +3543,9 @@ var lineBreakCodePoints = [][4]int{ {0x2FA1E, 0x2FA1F, prID, gcCn}, // [2] .. {0x2FA20, 0x2FFFD, prID, gcCn}, // [1502] .. {0x30000, 0x3134A, prID, gcLo}, // [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A - {0x3134B, 0x3FFFD, prID, gcCn}, // [60595] .. + {0x3134B, 0x3134F, prID, gcCn}, // [5] .. + {0x31350, 0x323AF, prID, gcLo}, // [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF + {0x323B0, 0x3FFFD, prID, gcCn}, // [56398] .. {0xE0001, 0xE0001, prCM, gcCf}, // LANGUAGE TAG {0xE0020, 0xE007F, prCM, gcCf}, // [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prCM, gcMn}, // [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/linerules.go b/vendor/github.com/rivo/uniseg/linerules.go index d2ad51680..7708ae0fb 100644 --- a/vendor/github.com/rivo/uniseg/linerules.go +++ b/vendor/github.com/rivo/uniseg/linerules.go @@ -64,222 +64,381 @@ const ( LineMustBreak // You must break the line here. ) -// The line break parser's state transitions. It's anologous to grTransitions, -// see comments there for details. Unicode version 14.0.0. -var lbTransitions = map[[2]int][3]int{ +// lbTransitions implements the line break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func lbTransitions(state, prop int) (newState, lineBreak, rule int) { + switch uint64(state) | uint64(prop)<<32 { // LB4. - {lbAny, prBK}: {lbBK, LineCanBreak, 310}, - {lbBK, prAny}: {lbAny, LineMustBreak, 40}, + case lbBK | prAny<<32: + return lbAny, LineMustBreak, 40 // LB5. - {lbAny, prCR}: {lbCR, LineCanBreak, 310}, - {lbAny, prLF}: {lbLF, LineCanBreak, 310}, - {lbAny, prNL}: {lbNL, LineCanBreak, 310}, - {lbCR, prLF}: {lbLF, LineDontBreak, 50}, - {lbCR, prAny}: {lbAny, LineMustBreak, 50}, - {lbLF, prAny}: {lbAny, LineMustBreak, 50}, - {lbNL, prAny}: {lbAny, LineMustBreak, 50}, + case lbCR | prLF<<32: + return lbLF, LineDontBreak, 50 + case lbCR | prAny<<32: + return lbAny, LineMustBreak, 50 + case lbLF | prAny<<32: + return lbAny, LineMustBreak, 50 + case lbNL | prAny<<32: + return lbAny, LineMustBreak, 50 // LB6. - {lbAny, prBK}: {lbBK, LineDontBreak, 60}, - {lbAny, prCR}: {lbCR, LineDontBreak, 60}, - {lbAny, prLF}: {lbLF, LineDontBreak, 60}, - {lbAny, prNL}: {lbNL, LineDontBreak, 60}, + case lbAny | prBK<<32: + return lbBK, LineDontBreak, 60 + case lbAny | prCR<<32: + return lbCR, LineDontBreak, 60 + case lbAny | prLF<<32: + return lbLF, LineDontBreak, 60 + case lbAny | prNL<<32: + return lbNL, LineDontBreak, 60 // LB7. - {lbAny, prSP}: {lbSP, LineDontBreak, 70}, - {lbAny, prZW}: {lbZW, LineDontBreak, 70}, + case lbAny | prSP<<32: + return lbSP, LineDontBreak, 70 + case lbAny | prZW<<32: + return lbZW, LineDontBreak, 70 // LB8. - {lbZW, prSP}: {lbZW, LineDontBreak, 70}, - {lbZW, prAny}: {lbAny, LineCanBreak, 80}, + case lbZW | prSP<<32: + return lbZW, LineDontBreak, 70 + case lbZW | prAny<<32: + return lbAny, LineCanBreak, 80 // LB11. - {lbAny, prWJ}: {lbWJ, LineDontBreak, 110}, - {lbWJ, prAny}: {lbAny, LineDontBreak, 110}, + case lbAny | prWJ<<32: + return lbWJ, LineDontBreak, 110 + case lbWJ | prAny<<32: + return lbAny, LineDontBreak, 110 // LB12. - {lbAny, prGL}: {lbGL, LineCanBreak, 310}, - {lbGL, prAny}: {lbAny, LineDontBreak, 120}, + case lbAny | prGL<<32: + return lbGL, LineCanBreak, 310 + case lbGL | prAny<<32: + return lbAny, LineDontBreak, 120 // LB13 (simple transitions). - {lbAny, prCL}: {lbCL, LineCanBreak, 310}, - {lbAny, prCP}: {lbCP, LineCanBreak, 310}, - {lbAny, prEX}: {lbEX, LineDontBreak, 130}, - {lbAny, prIS}: {lbIS, LineCanBreak, 310}, - {lbAny, prSY}: {lbSY, LineCanBreak, 310}, + case lbAny | prCL<<32: + return lbCL, LineCanBreak, 310 + case lbAny | prCP<<32: + return lbCP, LineCanBreak, 310 + case lbAny | prEX<<32: + return lbEX, LineDontBreak, 130 + case lbAny | prIS<<32: + return lbIS, LineCanBreak, 310 + case lbAny | prSY<<32: + return lbSY, LineCanBreak, 310 // LB14. - {lbAny, prOP}: {lbOP, LineCanBreak, 310}, - {lbOP, prSP}: {lbOP, LineDontBreak, 70}, - {lbOP, prAny}: {lbAny, LineDontBreak, 140}, + case lbAny | prOP<<32: + return lbOP, LineCanBreak, 310 + case lbOP | prSP<<32: + return lbOP, LineDontBreak, 70 + case lbOP | prAny<<32: + return lbAny, LineDontBreak, 140 // LB15. - {lbQU, prSP}: {lbQUSP, LineDontBreak, 70}, - {lbQU, prOP}: {lbOP, LineDontBreak, 150}, - {lbQUSP, prOP}: {lbOP, LineDontBreak, 150}, + case lbQU | prSP<<32: + return lbQUSP, LineDontBreak, 70 + case lbQU | prOP<<32: + return lbOP, LineDontBreak, 150 + case lbQUSP | prOP<<32: + return lbOP, LineDontBreak, 150 // LB16. - {lbCL, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbNUCL, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbCP, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbNUCP, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbCL, prNS}: {lbNS, LineDontBreak, 160}, - {lbNUCL, prNS}: {lbNS, LineDontBreak, 160}, - {lbCP, prNS}: {lbNS, LineDontBreak, 160}, - {lbNUCP, prNS}: {lbNS, LineDontBreak, 160}, - {lbCLCPSP, prNS}: {lbNS, LineDontBreak, 160}, + case lbCL | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbNUCL | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbCP | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbNUCP | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbCL | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbNUCL | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbCP | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbNUCP | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbCLCPSP | prNS<<32: + return lbNS, LineDontBreak, 160 // LB17. - {lbAny, prB2}: {lbB2, LineCanBreak, 310}, - {lbB2, prSP}: {lbB2SP, LineDontBreak, 70}, - {lbB2, prB2}: {lbB2, LineDontBreak, 170}, - {lbB2SP, prB2}: {lbB2, LineDontBreak, 170}, + case lbAny | prB2<<32: + return lbB2, LineCanBreak, 310 + case lbB2 | prSP<<32: + return lbB2SP, LineDontBreak, 70 + case lbB2 | prB2<<32: + return lbB2, LineDontBreak, 170 + case lbB2SP | prB2<<32: + return lbB2, LineDontBreak, 170 // LB18. - {lbSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbQUSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbCLCPSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbB2SP, prAny}: {lbAny, LineCanBreak, 180}, + case lbSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbQUSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbCLCPSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbB2SP | prAny<<32: + return lbAny, LineCanBreak, 180 // LB19. - {lbAny, prQU}: {lbQU, LineDontBreak, 190}, - {lbQU, prAny}: {lbAny, LineDontBreak, 190}, + case lbAny | prQU<<32: + return lbQU, LineDontBreak, 190 + case lbQU | prAny<<32: + return lbAny, LineDontBreak, 190 // LB20. - {lbAny, prCB}: {lbCB, LineCanBreak, 200}, - {lbCB, prAny}: {lbAny, LineCanBreak, 200}, + case lbAny | prCB<<32: + return lbCB, LineCanBreak, 200 + case lbCB | prAny<<32: + return lbAny, LineCanBreak, 200 // LB21. - {lbAny, prBA}: {lbBA, LineDontBreak, 210}, - {lbAny, prHY}: {lbHY, LineDontBreak, 210}, - {lbAny, prNS}: {lbNS, LineDontBreak, 210}, - {lbAny, prBB}: {lbBB, LineCanBreak, 310}, - {lbBB, prAny}: {lbAny, LineDontBreak, 210}, + case lbAny | prBA<<32: + return lbBA, LineDontBreak, 210 + case lbAny | prHY<<32: + return lbHY, LineDontBreak, 210 + case lbAny | prNS<<32: + return lbNS, LineDontBreak, 210 + case lbAny | prBB<<32: + return lbBB, LineCanBreak, 310 + case lbBB | prAny<<32: + return lbAny, LineDontBreak, 210 // LB21a. - {lbAny, prHL}: {lbHL, LineCanBreak, 310}, - {lbHL, prHY}: {lbLB21a, LineDontBreak, 210}, - {lbHL, prBA}: {lbLB21a, LineDontBreak, 210}, - {lbLB21a, prAny}: {lbAny, LineDontBreak, 211}, + case lbAny | prHL<<32: + return lbHL, LineCanBreak, 310 + case lbHL | prHY<<32: + return lbLB21a, LineDontBreak, 210 + case lbHL | prBA<<32: + return lbLB21a, LineDontBreak, 210 + case lbLB21a | prAny<<32: + return lbAny, LineDontBreak, 211 // LB21b. - {lbSY, prHL}: {lbHL, LineDontBreak, 212}, - {lbNUSY, prHL}: {lbHL, LineDontBreak, 212}, + case lbSY | prHL<<32: + return lbHL, LineDontBreak, 212 + case lbNUSY | prHL<<32: + return lbHL, LineDontBreak, 212 // LB22. - {lbAny, prIN}: {lbAny, LineDontBreak, 220}, + case lbAny | prIN<<32: + return lbAny, LineDontBreak, 220 // LB23. - {lbAny, prAL}: {lbAL, LineCanBreak, 310}, - {lbAny, prNU}: {lbNU, LineCanBreak, 310}, - {lbAL, prNU}: {lbNU, LineDontBreak, 230}, - {lbHL, prNU}: {lbNU, LineDontBreak, 230}, - {lbNU, prAL}: {lbAL, LineDontBreak, 230}, - {lbNU, prHL}: {lbHL, LineDontBreak, 230}, - {lbNUNU, prAL}: {lbAL, LineDontBreak, 230}, - {lbNUNU, prHL}: {lbHL, LineDontBreak, 230}, + case lbAny | prAL<<32: + return lbAL, LineCanBreak, 310 + case lbAny | prNU<<32: + return lbNU, LineCanBreak, 310 + case lbAL | prNU<<32: + return lbNU, LineDontBreak, 230 + case lbHL | prNU<<32: + return lbNU, LineDontBreak, 230 + case lbNU | prAL<<32: + return lbAL, LineDontBreak, 230 + case lbNU | prHL<<32: + return lbHL, LineDontBreak, 230 + case lbNUNU | prAL<<32: + return lbAL, LineDontBreak, 230 + case lbNUNU | prHL<<32: + return lbHL, LineDontBreak, 230 // LB23a. - {lbAny, prPR}: {lbPR, LineCanBreak, 310}, - {lbAny, prID}: {lbIDEM, LineCanBreak, 310}, - {lbAny, prEB}: {lbEB, LineCanBreak, 310}, - {lbAny, prEM}: {lbIDEM, LineCanBreak, 310}, - {lbPR, prID}: {lbIDEM, LineDontBreak, 231}, - {lbPR, prEB}: {lbEB, LineDontBreak, 231}, - {lbPR, prEM}: {lbIDEM, LineDontBreak, 231}, - {lbIDEM, prPO}: {lbPO, LineDontBreak, 231}, - {lbEB, prPO}: {lbPO, LineDontBreak, 231}, + case lbAny | prPR<<32: + return lbPR, LineCanBreak, 310 + case lbAny | prID<<32: + return lbIDEM, LineCanBreak, 310 + case lbAny | prEB<<32: + return lbEB, LineCanBreak, 310 + case lbAny | prEM<<32: + return lbIDEM, LineCanBreak, 310 + case lbPR | prID<<32: + return lbIDEM, LineDontBreak, 231 + case lbPR | prEB<<32: + return lbEB, LineDontBreak, 231 + case lbPR | prEM<<32: + return lbIDEM, LineDontBreak, 231 + case lbIDEM | prPO<<32: + return lbPO, LineDontBreak, 231 + case lbEB | prPO<<32: + return lbPO, LineDontBreak, 231 // LB24. - {lbAny, prPO}: {lbPO, LineCanBreak, 310}, - {lbPR, prAL}: {lbAL, LineDontBreak, 240}, - {lbPR, prHL}: {lbHL, LineDontBreak, 240}, - {lbPO, prAL}: {lbAL, LineDontBreak, 240}, - {lbPO, prHL}: {lbHL, LineDontBreak, 240}, - {lbAL, prPR}: {lbPR, LineDontBreak, 240}, - {lbAL, prPO}: {lbPO, LineDontBreak, 240}, - {lbHL, prPR}: {lbPR, LineDontBreak, 240}, - {lbHL, prPO}: {lbPO, LineDontBreak, 240}, + case lbAny | prPO<<32: + return lbPO, LineCanBreak, 310 + case lbPR | prAL<<32: + return lbAL, LineDontBreak, 240 + case lbPR | prHL<<32: + return lbHL, LineDontBreak, 240 + case lbPO | prAL<<32: + return lbAL, LineDontBreak, 240 + case lbPO | prHL<<32: + return lbHL, LineDontBreak, 240 + case lbAL | prPR<<32: + return lbPR, LineDontBreak, 240 + case lbAL | prPO<<32: + return lbPO, LineDontBreak, 240 + case lbHL | prPR<<32: + return lbPR, LineDontBreak, 240 + case lbHL | prPO<<32: + return lbPO, LineDontBreak, 240 // LB25 (simple transitions). - {lbPR, prNU}: {lbNU, LineDontBreak, 250}, - {lbPO, prNU}: {lbNU, LineDontBreak, 250}, - {lbOP, prNU}: {lbNU, LineDontBreak, 250}, - {lbHY, prNU}: {lbNU, LineDontBreak, 250}, - {lbNU, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNU, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNU, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUNU, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUNU, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUNU, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUSY, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUSY, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUSY, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUIS, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUIS, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUIS, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNU, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNU, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUNU, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUNU, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUSY, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUSY, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUIS, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUIS, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNU, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUNU, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUSY, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUIS, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUCL, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUCP, prPO}: {lbPO, LineDontBreak, 250}, - {lbNU, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUNU, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUSY, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUIS, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUCL, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUCP, prPR}: {lbPR, LineDontBreak, 250}, + case lbPR | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbPO | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbOP | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbHY | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbNU | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNU | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNU | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUNU | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUNU | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUNU | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUSY | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUSY | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUSY | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUIS | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUIS | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUIS | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNU | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNU | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUNU | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUNU | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUSY | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUSY | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUIS | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUIS | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNU | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUNU | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUSY | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUIS | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUCL | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUCP | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNU | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUNU | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUSY | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUIS | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUCL | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUCP | prPR<<32: + return lbPR, LineDontBreak, 250 // LB26. - {lbAny, prJL}: {lbJL, LineCanBreak, 310}, - {lbAny, prJV}: {lbJV, LineCanBreak, 310}, - {lbAny, prJT}: {lbJT, LineCanBreak, 310}, - {lbAny, prH2}: {lbH2, LineCanBreak, 310}, - {lbAny, prH3}: {lbH3, LineCanBreak, 310}, - {lbJL, prJL}: {lbJL, LineDontBreak, 260}, - {lbJL, prJV}: {lbJV, LineDontBreak, 260}, - {lbJL, prH2}: {lbH2, LineDontBreak, 260}, - {lbJL, prH3}: {lbH3, LineDontBreak, 260}, - {lbJV, prJV}: {lbJV, LineDontBreak, 260}, - {lbJV, prJT}: {lbJT, LineDontBreak, 260}, - {lbH2, prJV}: {lbJV, LineDontBreak, 260}, - {lbH2, prJT}: {lbJT, LineDontBreak, 260}, - {lbJT, prJT}: {lbJT, LineDontBreak, 260}, - {lbH3, prJT}: {lbJT, LineDontBreak, 260}, + case lbAny | prJL<<32: + return lbJL, LineCanBreak, 310 + case lbAny | prJV<<32: + return lbJV, LineCanBreak, 310 + case lbAny | prJT<<32: + return lbJT, LineCanBreak, 310 + case lbAny | prH2<<32: + return lbH2, LineCanBreak, 310 + case lbAny | prH3<<32: + return lbH3, LineCanBreak, 310 + case lbJL | prJL<<32: + return lbJL, LineDontBreak, 260 + case lbJL | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbJL | prH2<<32: + return lbH2, LineDontBreak, 260 + case lbJL | prH3<<32: + return lbH3, LineDontBreak, 260 + case lbJV | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbJV | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbH2 | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbH2 | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbJT | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbH3 | prJT<<32: + return lbJT, LineDontBreak, 260 // LB27. - {lbJL, prPO}: {lbPO, LineDontBreak, 270}, - {lbJV, prPO}: {lbPO, LineDontBreak, 270}, - {lbJT, prPO}: {lbPO, LineDontBreak, 270}, - {lbH2, prPO}: {lbPO, LineDontBreak, 270}, - {lbH3, prPO}: {lbPO, LineDontBreak, 270}, - {lbPR, prJL}: {lbJL, LineDontBreak, 270}, - {lbPR, prJV}: {lbJV, LineDontBreak, 270}, - {lbPR, prJT}: {lbJT, LineDontBreak, 270}, - {lbPR, prH2}: {lbH2, LineDontBreak, 270}, - {lbPR, prH3}: {lbH3, LineDontBreak, 270}, + case lbJL | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbJV | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbJT | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbH2 | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbH3 | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbPR | prJL<<32: + return lbJL, LineDontBreak, 270 + case lbPR | prJV<<32: + return lbJV, LineDontBreak, 270 + case lbPR | prJT<<32: + return lbJT, LineDontBreak, 270 + case lbPR | prH2<<32: + return lbH2, LineDontBreak, 270 + case lbPR | prH3<<32: + return lbH3, LineDontBreak, 270 // LB28. - {lbAL, prAL}: {lbAL, LineDontBreak, 280}, - {lbAL, prHL}: {lbHL, LineDontBreak, 280}, - {lbHL, prAL}: {lbAL, LineDontBreak, 280}, - {lbHL, prHL}: {lbHL, LineDontBreak, 280}, + case lbAL | prAL<<32: + return lbAL, LineDontBreak, 280 + case lbAL | prHL<<32: + return lbHL, LineDontBreak, 280 + case lbHL | prAL<<32: + return lbAL, LineDontBreak, 280 + case lbHL | prHL<<32: + return lbHL, LineDontBreak, 280 // LB29. - {lbIS, prAL}: {lbAL, LineDontBreak, 290}, - {lbIS, prHL}: {lbHL, LineDontBreak, 290}, - {lbNUIS, prAL}: {lbAL, LineDontBreak, 290}, - {lbNUIS, prHL}: {lbHL, LineDontBreak, 290}, + case lbIS | prAL<<32: + return lbAL, LineDontBreak, 290 + case lbIS | prHL<<32: + return lbHL, LineDontBreak, 290 + case lbNUIS | prAL<<32: + return lbAL, LineDontBreak, 290 + case lbNUIS | prHL<<32: + return lbHL, LineDontBreak, 290 + + default: + return -1, -1, -1 + } } // transitionLineBreakState determines the new state of the line break parser @@ -290,7 +449,7 @@ var lbTransitions = map[[2]int][3]int{ // further lookups. func transitionLineBreakState(state int, r rune, b []byte, str string) (newState int, lineBreak int) { // Determine the property of the next character. - nextProperty, generalCategory := propertyWithGenCat(lineBreakCodePoints, r) + nextProperty, generalCategory := propertyLineBreak(r) // Prepare. var forceNoBreak, isCPeaFWH bool @@ -306,7 +465,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState defer func() { // Transition into LB30. if newState == lbCP || newState == lbNUCP { - ea := property(eastAsianWidth, r) + ea := propertyEastAsianWidth(r) if ea != prF && ea != prW && ea != prH { newState |= lbCPeaFWHBit } @@ -352,30 +511,27 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState // Find the applicable transition in the table. var rule int - transition, ok := lbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, lineBreak, rule = transition[0], transition[1], transition[2] - } else { + newState, lineBreak, rule = lbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := lbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := lbTransitions[[2]int{lbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropProp, anyPropLineBreak, anyPropRule := lbTransitions(state, prAny) + anyStateProp, anyStateLineBreak, anyStateRule := lbTransitions(lbAny, nextProperty) + if anyPropProp >= 0 && anyStateProp >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, lineBreak, rule = transAnyState[0], transAnyState[1], transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - lineBreak, rule = transAnyProp[1], transAnyProp[2] + newState, lineBreak, rule = anyStateProp, anyStateLineBreak, anyStateRule + if anyPropRule < anyStateRule { + lineBreak, rule = anyPropLineBreak, anyPropRule } - } else if okAnyProp { + } else if anyPropProp >= 0 { // We only have a specific state. - newState, lineBreak, rule = transAnyProp[0], transAnyProp[1], transAnyProp[2] + newState, lineBreak, rule = anyPropProp, anyPropLineBreak, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateProp >= 0 { // We only have a specific property. - newState, lineBreak, rule = transAnyState[0], transAnyState[1], transAnyState[2] + newState, lineBreak, rule = anyStateProp, anyStateLineBreak, anyStateRule } else { // No known transition. LB31: ALL ÷ ALL. newState, lineBreak, rule = lbAny, LineCanBreak, 310 @@ -414,7 +570,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState r, _ = utf8.DecodeRuneInString(str) } if r != utf8.RuneError { - pr, _ := propertyWithGenCat(lineBreakCodePoints, r) + pr, _ := propertyLineBreak(r) if pr == prNU { return lbNU, LineDontBreak } @@ -424,7 +580,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState // LB30 (part one). if rule > 300 { if (state == lbAL || state == lbHL || state == lbNU || state == lbNUNU) && nextProperty == prOP { - ea := property(eastAsianWidth, r) + ea := propertyEastAsianWidth(r) if ea != prF && ea != prW && ea != prH { return lbOP, LineDontBreak } @@ -460,7 +616,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState return prAny, LineDontBreak } } - graphemeProperty := property(graphemeCodePoints, r) + graphemeProperty := propertyGraphemes(r) if graphemeProperty == prExtendedPictographic && generalCategory == gcCn { return lbExtPicCn, LineCanBreak } diff --git a/vendor/github.com/rivo/uniseg/properties.go b/vendor/github.com/rivo/uniseg/properties.go index bc3c7bcf3..6290e6810 100644 --- a/vendor/github.com/rivo/uniseg/properties.go +++ b/vendor/github.com/rivo/uniseg/properties.go @@ -160,9 +160,49 @@ func property(dictionary [][3]int, r rune) int { return propertySearch(dictionary, r)[2] } -// propertyWithGenCat returns the Unicode property value and General Category -// (see constants above) of the given code point. -func propertyWithGenCat(dictionary [][4]int, r rune) (property, generalCategory int) { - entry := propertySearch(dictionary, r) +// propertyLineBreak returns the Unicode property value and General Category +// (see constants above) of the given code point, as listed in the line break +// code points table, while fast tracking ASCII digits and letters. +func propertyLineBreak(r rune) (property, generalCategory int) { + if r >= 'a' && r <= 'z' { + return prAL, gcLl + } + if r >= 'A' && r <= 'Z' { + return prAL, gcLu + } + if r >= '0' && r <= '9' { + return prNU, gcNd + } + entry := propertySearch(lineBreakCodePoints, r) return entry[2], entry[3] } + +// propertyGraphemes returns the Unicode grapheme cluster property value of the +// given code point while fast tracking ASCII characters. +func propertyGraphemes(r rune) int { + if r >= 0x20 && r <= 0x7e { + return prAny + } + if r == 0x0a { + return prLF + } + if r == 0x0d { + return prCR + } + if r >= 0 && r <= 0x1f || r == 0x7f { + return prControl + } + return property(graphemeCodePoints, r) +} + +// propertyEastAsianWidth returns the Unicode East Asian Width property value of +// the given code point while fast tracking ASCII characters. +func propertyEastAsianWidth(r rune) int { + if r >= 0x20 && r <= 0x7e { + return prNa + } + if r >= 0 && r <= 0x1f || r == 0x7f { + return prN + } + return property(eastAsianWidth, r) +} diff --git a/vendor/github.com/rivo/uniseg/sentenceproperties.go b/vendor/github.com/rivo/uniseg/sentenceproperties.go index ba0cf2de1..67717ec1f 100644 --- a/vendor/github.com/rivo/uniseg/sentenceproperties.go +++ b/vendor/github.com/rivo/uniseg/sentenceproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // sentenceBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/SentenceBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/SentenceBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var sentenceBreakCodePoints = [][3]int{ {0x0009, 0x0009, prSp}, // Cc @@ -843,6 +843,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNumeric}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prOLetter}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prExtend}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prExtend}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prOLetter}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -896,7 +897,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x0EBD, 0x0EBD, prOLetter}, // Lo LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prOLetter}, // Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prOLetter}, // Lm LAO KO LA - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNumeric}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prOLetter}, // Lo [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prOLetter}, // Lo TIBETAN SYLLABLE OM @@ -958,7 +959,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x10C7, 0x10C7, prUpper}, // L& GEORGIAN CAPITAL LETTER YN {0x10CD, 0x10CD, prUpper}, // L& GEORGIAN CAPITAL LETTER AEN {0x10D0, 0x10FA, prOLetter}, // L& [43] GEORGIAN LETTER AN..GEORGIAN LETTER AIN - {0x10FC, 0x10FC, prOLetter}, // Lm MODIFIER LETTER GEORGIAN NAR + {0x10FC, 0x10FC, prLower}, // Lm MODIFIER LETTER GEORGIAN NAR {0x10FD, 0x10FF, prOLetter}, // L& [3] GEORGIAN LETTER AEN..GEORGIAN LETTER LABIAL SIGN {0x1100, 0x1248, prOLetter}, // Lo [329] HANGUL CHOSEONG KIYEOK..ETHIOPIC SYLLABLE QWA {0x124A, 0x124D, prOLetter}, // Lo [4] ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE @@ -2034,7 +2035,7 @@ var sentenceBreakCodePoints = [][3]int{ {0xA7D7, 0xA7D7, prLower}, // L& LATIN SMALL LETTER MIDDLE SCOTS S {0xA7D8, 0xA7D8, prUpper}, // L& LATIN CAPITAL LETTER SIGMOID S {0xA7D9, 0xA7D9, prLower}, // L& LATIN SMALL LETTER SIGMOID S - {0xA7F2, 0xA7F4, prOLetter}, // Lm [3] MODIFIER LETTER CAPITAL C..MODIFIER LETTER CAPITAL Q + {0xA7F2, 0xA7F4, prLower}, // Lm [3] MODIFIER LETTER CAPITAL C..MODIFIER LETTER CAPITAL Q {0xA7F5, 0xA7F5, prUpper}, // L& LATIN CAPITAL LETTER REVERSED HALF H {0xA7F6, 0xA7F6, prLower}, // L& LATIN SMALL LETTER REVERSED HALF H {0xA7F7, 0xA7F7, prOLetter}, // Lo LATIN EPIGRAPHIC LETTER SIDEWAYS I @@ -2140,7 +2141,7 @@ var sentenceBreakCodePoints = [][3]int{ {0xAB30, 0xAB5A, prLower}, // L& [43] LATIN SMALL LETTER BARRED ALPHA..LATIN SMALL LETTER Y WITH SHORT RIGHT LEG {0xAB5C, 0xAB5F, prLower}, // Lm [4] MODIFIER LETTER SMALL HENG..MODIFIER LETTER SMALL U WITH LEFT HOOK {0xAB60, 0xAB68, prLower}, // L& [9] LATIN SMALL LETTER SAKHA YAT..LATIN SMALL LETTER TURNED R WITH MIDDLE TILDE - {0xAB69, 0xAB69, prOLetter}, // Lm MODIFIER LETTER SMALL TURNED W + {0xAB69, 0xAB69, prLower}, // Lm MODIFIER LETTER SMALL TURNED W {0xAB70, 0xABBF, prLower}, // L& [80] CHEROKEE SMALL LETTER A..CHEROKEE SMALL LETTER YA {0xABC0, 0xABE2, prOLetter}, // Lo [35] MEETEI MAYEK LETTER KOK..MEETEI MAYEK LETTER I LONSUM {0xABE3, 0xABE4, prExtend}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP @@ -2334,6 +2335,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x10E80, 0x10EA9, prOLetter}, // Lo [42] YEZIDI LETTER ELIF..YEZIDI LETTER ET {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EB0, 0x10EB1, prOLetter}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prOLetter}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F27, 0x10F27, prOLetter}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH {0x10F30, 0x10F45, prOLetter}, // Lo [22] SOGDIAN LETTER ALEPH..SOGDIAN INDEPENDENT SHIN @@ -2408,6 +2410,8 @@ var sentenceBreakCodePoints = [][3]int{ {0x11238, 0x11239, prSTerm}, // Po [2] KHOJKI DANDA..KHOJKI DOUBLE DANDA {0x1123B, 0x1123C, prSTerm}, // Po [2] KHOJKI SECTION MARK..KHOJKI DOUBLE SECTION MARK {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prOLetter}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prOLetter}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prOLetter}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prOLetter}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -2603,13 +2607,29 @@ var sentenceBreakCodePoints = [][3]int{ {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prExtend}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prSTerm}, // Po [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prOLetter}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prExtend}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prOLetter}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prOLetter}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prExtend}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prExtend}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prExtend}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x11F43, 0x11F44, prSTerm}, // Po [2] KAWI DANDA..KAWI DOUBLE DANDA + {0x11F50, 0x11F59, prNumeric}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prOLetter}, // Lo LISU LETTER YHA {0x12000, 0x12399, prOLetter}, // Lo [922] CUNEIFORM SIGN A..CUNEIFORM SIGN U U {0x12400, 0x1246E, prOLetter}, // Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NUMERIC SIGN NINE U VARIANT FORM {0x12480, 0x12543, prOLetter}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prOLetter}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 - {0x13000, 0x1342E, prOLetter}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prFormat}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prOLetter}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prFormat}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prOLetter}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prOLetter}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prOLetter}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prOLetter}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -2648,7 +2668,9 @@ var sentenceBreakCodePoints = [][3]int{ {0x1AFF5, 0x1AFFB, prOLetter}, // Lm [7] KATAKANA LETTER MINNAN TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-5 {0x1AFFD, 0x1AFFE, prOLetter}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B122, prOLetter}, // Lo [291] KATAKANA LETTER ARCHAIC E..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prOLetter}, // Lo HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prOLetter}, // Lo [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prOLetter}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prOLetter}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prOLetter}, // Lo [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prOLetter}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -2738,11 +2760,14 @@ var sentenceBreakCodePoints = [][3]int{ {0x1DF00, 0x1DF09, prLower}, // L& [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prOLetter}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prLower}, // L& [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prLower}, // L& [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prLower}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prOLetter}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prOLetter}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -2753,6 +2778,10 @@ var sentenceBreakCodePoints = [][3]int{ {0x1E2C0, 0x1E2EB, prOLetter}, // Lo [44] WANCHO LETTER AA..WANCHO LETTER YIH {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNumeric}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE + {0x1E4D0, 0x1E4EA, prOLetter}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prOLetter}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNumeric}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prOLetter}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prOLetter}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prOLetter}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -2803,12 +2832,13 @@ var sentenceBreakCodePoints = [][3]int{ {0x1F676, 0x1F678, prClose}, // So [3] SANS-SERIF HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT..SANS-SERIF HEAVY LOW DOUBLE COMMA QUOTATION MARK ORNAMENT {0x1FBF0, 0x1FBF9, prNumeric}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x20000, 0x2A6DF, prOLetter}, // Lo [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF - {0x2A700, 0x2B738, prOLetter}, // Lo [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 + {0x2A700, 0x2B739, prOLetter}, // Lo [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 {0x2B740, 0x2B81D, prOLetter}, // Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B820, 0x2CEA1, prOLetter}, // Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 {0x2CEB0, 0x2EBE0, prOLetter}, // Lo [7473] CJK UNIFIED IDEOGRAPH-2CEB0..CJK UNIFIED IDEOGRAPH-2EBE0 {0x2F800, 0x2FA1D, prOLetter}, // Lo [542] CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPATIBILITY IDEOGRAPH-2FA1D {0x30000, 0x3134A, prOLetter}, // Lo [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A + {0x31350, 0x323AF, prOLetter}, // Lo [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF {0xE0001, 0xE0001, prFormat}, // Cf LANGUAGE TAG {0xE0020, 0xE007F, prExtend}, // Cf [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prExtend}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/sentencerules.go b/vendor/github.com/rivo/uniseg/sentencerules.go index 58c04794e..0b29c7bdb 100644 --- a/vendor/github.com/rivo/uniseg/sentencerules.go +++ b/vendor/github.com/rivo/uniseg/sentencerules.go @@ -18,104 +18,178 @@ const ( sbSB8aSp ) -// The sentence break parser's breaking instructions. -const ( - sbDontBreak = iota - sbBreak -) - -// The sentence break parser's state transitions. It's anologous to -// grTransitions, see comments there for details. Unicode version 14.0.0. -var sbTransitions = map[[2]int][3]int{ +// sbTransitions implements the sentence break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func sbTransitions(state, prop int) (newState int, sentenceBreak bool, rule int) { + switch uint64(state) | uint64(prop)<<32 { // SB3. - {sbAny, prCR}: {sbCR, sbDontBreak, 9990}, - {sbCR, prLF}: {sbParaSep, sbDontBreak, 30}, + case sbAny | prCR<<32: + return sbCR, false, 9990 + case sbCR | prLF<<32: + return sbParaSep, false, 30 // SB4. - {sbAny, prSep}: {sbParaSep, sbDontBreak, 9990}, - {sbAny, prLF}: {sbParaSep, sbDontBreak, 9990}, - {sbParaSep, prAny}: {sbAny, sbBreak, 40}, - {sbCR, prAny}: {sbAny, sbBreak, 40}, + case sbAny | prSep<<32: + return sbParaSep, false, 9990 + case sbAny | prLF<<32: + return sbParaSep, false, 9990 + case sbParaSep | prAny<<32: + return sbAny, true, 40 + case sbCR | prAny<<32: + return sbAny, true, 40 // SB6. - {sbAny, prATerm}: {sbATerm, sbDontBreak, 9990}, - {sbATerm, prNumeric}: {sbAny, sbDontBreak, 60}, - {sbSB7, prNumeric}: {sbAny, sbDontBreak, 60}, // Because ATerm also appears in SB7. + case sbAny | prATerm<<32: + return sbATerm, false, 9990 + case sbATerm | prNumeric<<32: + return sbAny, false, 60 + case sbSB7 | prNumeric<<32: + return sbAny, false, 60 // Because ATerm also appears in SB7. // SB7. - {sbAny, prUpper}: {sbUpper, sbDontBreak, 9990}, - {sbAny, prLower}: {sbLower, sbDontBreak, 9990}, - {sbUpper, prATerm}: {sbSB7, sbDontBreak, 70}, - {sbLower, prATerm}: {sbSB7, sbDontBreak, 70}, - {sbSB7, prUpper}: {sbUpper, sbDontBreak, 70}, + case sbAny | prUpper<<32: + return sbUpper, false, 9990 + case sbAny | prLower<<32: + return sbLower, false, 9990 + case sbUpper | prATerm<<32: + return sbSB7, false, 70 + case sbLower | prATerm<<32: + return sbSB7, false, 70 + case sbSB7 | prUpper<<32: + return sbUpper, false, 70 // SB8a. - {sbAny, prSTerm}: {sbSTerm, sbDontBreak, 9990}, - {sbATerm, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbATerm, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbATerm, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB7, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB7, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB7, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8Close, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8Close, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8Close, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8Sp, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8Sp, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8Sp, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSTerm, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSTerm, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSTerm, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8aClose, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8aClose, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8aClose, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8aSp, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8aSp, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8aSp, prSTerm}: {sbSTerm, sbDontBreak, 81}, + case sbAny | prSTerm<<32: + return sbSTerm, false, 9990 + case sbATerm | prSContinue<<32: + return sbAny, false, 81 + case sbATerm | prATerm<<32: + return sbATerm, false, 81 + case sbATerm | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB7 | prSContinue<<32: + return sbAny, false, 81 + case sbSB7 | prATerm<<32: + return sbATerm, false, 81 + case sbSB7 | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8Close | prSContinue<<32: + return sbAny, false, 81 + case sbSB8Close | prATerm<<32: + return sbATerm, false, 81 + case sbSB8Close | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8Sp | prSContinue<<32: + return sbAny, false, 81 + case sbSB8Sp | prATerm<<32: + return sbATerm, false, 81 + case sbSB8Sp | prSTerm<<32: + return sbSTerm, false, 81 + case sbSTerm | prSContinue<<32: + return sbAny, false, 81 + case sbSTerm | prATerm<<32: + return sbATerm, false, 81 + case sbSTerm | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8aClose | prSContinue<<32: + return sbAny, false, 81 + case sbSB8aClose | prATerm<<32: + return sbATerm, false, 81 + case sbSB8aClose | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8aSp | prSContinue<<32: + return sbAny, false, 81 + case sbSB8aSp | prATerm<<32: + return sbATerm, false, 81 + case sbSB8aSp | prSTerm<<32: + return sbSTerm, false, 81 // SB9. - {sbATerm, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbSB7, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbSB8Close, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbATerm, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSB7, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSB8Close, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSTerm, prClose}: {sbSB8aClose, sbDontBreak, 90}, - {sbSB8aClose, prClose}: {sbSB8aClose, sbDontBreak, 90}, - {sbSTerm, prSp}: {sbSB8aSp, sbDontBreak, 90}, - {sbSB8aClose, prSp}: {sbSB8aSp, sbDontBreak, 90}, - {sbATerm, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbATerm, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbATerm, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prLF}: {sbParaSep, sbDontBreak, 90}, + case sbATerm | prClose<<32: + return sbSB8Close, false, 90 + case sbSB7 | prClose<<32: + return sbSB8Close, false, 90 + case sbSB8Close | prClose<<32: + return sbSB8Close, false, 90 + case sbATerm | prSp<<32: + return sbSB8Sp, false, 90 + case sbSB7 | prSp<<32: + return sbSB8Sp, false, 90 + case sbSB8Close | prSp<<32: + return sbSB8Sp, false, 90 + case sbSTerm | prClose<<32: + return sbSB8aClose, false, 90 + case sbSB8aClose | prClose<<32: + return sbSB8aClose, false, 90 + case sbSTerm | prSp<<32: + return sbSB8aSp, false, 90 + case sbSB8aClose | prSp<<32: + return sbSB8aSp, false, 90 + case sbATerm | prSep<<32: + return sbParaSep, false, 90 + case sbATerm | prCR<<32: + return sbParaSep, false, 90 + case sbATerm | prLF<<32: + return sbParaSep, false, 90 + case sbSB7 | prSep<<32: + return sbParaSep, false, 90 + case sbSB7 | prCR<<32: + return sbParaSep, false, 90 + case sbSB7 | prLF<<32: + return sbParaSep, false, 90 + case sbSB8Close | prSep<<32: + return sbParaSep, false, 90 + case sbSB8Close | prCR<<32: + return sbParaSep, false, 90 + case sbSB8Close | prLF<<32: + return sbParaSep, false, 90 + case sbSTerm | prSep<<32: + return sbParaSep, false, 90 + case sbSTerm | prCR<<32: + return sbParaSep, false, 90 + case sbSTerm | prLF<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prSep<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prCR<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prLF<<32: + return sbParaSep, false, 90 // SB10. - {sbSB8Sp, prSp}: {sbSB8Sp, sbDontBreak, 100}, - {sbSB8aSp, prSp}: {sbSB8aSp, sbDontBreak, 100}, - {sbSB8Sp, prSep}: {sbParaSep, sbDontBreak, 100}, - {sbSB8Sp, prCR}: {sbParaSep, sbDontBreak, 100}, - {sbSB8Sp, prLF}: {sbParaSep, sbDontBreak, 100}, + case sbSB8Sp | prSp<<32: + return sbSB8Sp, false, 100 + case sbSB8aSp | prSp<<32: + return sbSB8aSp, false, 100 + case sbSB8Sp | prSep<<32: + return sbParaSep, false, 100 + case sbSB8Sp | prCR<<32: + return sbParaSep, false, 100 + case sbSB8Sp | prLF<<32: + return sbParaSep, false, 100 // SB11. - {sbATerm, prAny}: {sbAny, sbBreak, 110}, - {sbSB7, prAny}: {sbAny, sbBreak, 110}, - {sbSB8Close, prAny}: {sbAny, sbBreak, 110}, - {sbSB8Sp, prAny}: {sbAny, sbBreak, 110}, - {sbSTerm, prAny}: {sbAny, sbBreak, 110}, - {sbSB8aClose, prAny}: {sbAny, sbBreak, 110}, - {sbSB8aSp, prAny}: {sbAny, sbBreak, 110}, + case sbATerm | prAny<<32: + return sbAny, true, 110 + case sbSB7 | prAny<<32: + return sbAny, true, 110 + case sbSB8Close | prAny<<32: + return sbAny, true, 110 + case sbSB8Sp | prAny<<32: + return sbAny, true, 110 + case sbSTerm | prAny<<32: + return sbAny, true, 110 + case sbSB8aClose | prAny<<32: + return sbAny, true, 110 + case sbSB8aSp | prAny<<32: + return sbAny, true, 110 // We'll always break after ParaSep due to SB4. + + default: + return -1, false, -1 + } } // transitionSentenceBreakState determines the new state of the sentence break @@ -141,30 +215,27 @@ func transitionSentenceBreakState(state int, r rune, b []byte, str string) (newS // Find the applicable transition in the table. var rule int - transition, ok := sbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, sentenceBreak, rule = transition[0], transition[1] == sbBreak, transition[2] - } else { + newState, sentenceBreak, rule = sbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := sbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := sbTransitions[[2]int{sbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropState, anyPropProp, anyPropRule := sbTransitions(state, prAny) + anyStateState, anyStateProp, anyStateRule := sbTransitions(sbAny, nextProperty) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, sentenceBreak, rule = transAnyState[0], transAnyState[1] == sbBreak, transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - sentenceBreak, rule = transAnyProp[1] == sbBreak, transAnyProp[2] + newState, sentenceBreak, rule = anyStateState, anyStateProp, anyStateRule + if anyPropRule < anyStateRule { + sentenceBreak, rule = anyPropProp, anyPropRule } - } else if okAnyProp { + } else if anyPropState >= 0 { // We only have a specific state. - newState, sentenceBreak, rule = transAnyProp[0], transAnyProp[1] == sbBreak, transAnyProp[2] + newState, sentenceBreak, rule = anyPropState, anyPropProp, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateState >= 0 { // We only have a specific property. - newState, sentenceBreak, rule = transAnyState[0], transAnyState[1] == sbBreak, transAnyState[2] + newState, sentenceBreak, rule = anyStateState, anyStateProp, anyStateRule } else { // No known transition. SB999: Any × Any. newState, sentenceBreak, rule = sbAny, false, 9990 diff --git a/vendor/github.com/rivo/uniseg/step.go b/vendor/github.com/rivo/uniseg/step.go index 6eca4b5dc..9b72c5e59 100644 --- a/vendor/github.com/rivo/uniseg/step.go +++ b/vendor/github.com/rivo/uniseg/step.go @@ -100,7 +100,7 @@ func Step(b []byte, state int) (cluster, rest []byte, boundaries int, newState i if len(b) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftPropState } @@ -150,16 +150,14 @@ func Step(b []byte, state int) (cluster, rest []byte, boundaries int, newState i return b[:length], b[length:], boundary, graphemeState | (wordState << shiftWordState) | (sentenceState << shiftSentenceState) | (lineState << shiftLineState) | (prop << shiftPropState) } - if r == vs16 { - width = 2 - } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL { - width += runeWidth(r, prop) - } else if firstProp == prExtendedPictographic { + if firstProp == prExtendedPictographic { if r == vs15 { width = 1 - } else { + } else if r == vs16 { width = 2 } + } else if firstProp != prRegionalIndicator && firstProp != prL { + width += runeWidth(r, prop) } length += l @@ -179,7 +177,7 @@ func StepString(str string, state int) (cluster, rest string, boundaries int, ne // Extract the first rune. r, length := utf8.DecodeRuneInString(str) if len(str) <= length { // If we're already past the end, there is nothing else to parse. - prop := property(graphemeCodePoints, r) + prop := propertyGraphemes(r) return str, "", LineMustBreak | (1 << shiftWord) | (1 << shiftSentence) | (runeWidth(r, prop) << ShiftWidth), grAny | (wbAny << shiftWordState) | (sbAny << shiftSentenceState) | (lbAny << shiftLineState) } @@ -226,16 +224,14 @@ func StepString(str string, state int) (cluster, rest string, boundaries int, ne return str[:length], str[length:], boundary, graphemeState | (wordState << shiftWordState) | (sentenceState << shiftSentenceState) | (lineState << shiftLineState) | (prop << shiftPropState) } - if r == vs16 { - width = 2 - } else if firstProp != prExtendedPictographic && firstProp != prRegionalIndicator && firstProp != prL { - width += runeWidth(r, prop) - } else if firstProp == prExtendedPictographic { + if firstProp == prExtendedPictographic { if r == vs15 { width = 1 - } else { + } else if r == vs16 { width = 2 } + } else if firstProp != prRegionalIndicator && firstProp != prL { + width += runeWidth(r, prop) } length += l diff --git a/vendor/github.com/rivo/uniseg/width.go b/vendor/github.com/rivo/uniseg/width.go index 12a57cc2e..975a9f134 100644 --- a/vendor/github.com/rivo/uniseg/width.go +++ b/vendor/github.com/rivo/uniseg/width.go @@ -1,5 +1,10 @@ package uniseg +// EastAsianAmbiguousWidth specifies the monospace width for East Asian +// characters classified as Ambiguous. The default is 1 but some rare fonts +// render them with a width of 2. +var EastAsianAmbiguousWidth = 1 + // runeWidth returns the monospace width for the given rune. The provided // grapheme property is a value mapped by the [graphemeCodePoints] table. // @@ -33,9 +38,11 @@ func runeWidth(r rune, graphemeProperty int) int { return 4 } - switch property(eastAsianWidth, r) { + switch propertyEastAsianWidth(r) { case prW, prF: return 2 + case prA: + return EastAsianAmbiguousWidth } return 1 diff --git a/vendor/github.com/rivo/uniseg/wordproperties.go b/vendor/github.com/rivo/uniseg/wordproperties.go index 805cc536c..277ca1006 100644 --- a/vendor/github.com/rivo/uniseg/wordproperties.go +++ b/vendor/github.com/rivo/uniseg/wordproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // workBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/WordBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/WordBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var workBreakCodePoints = [][3]int{ {0x000A, 0x000A, prLF}, // Cc @@ -318,6 +318,7 @@ var workBreakCodePoints = [][3]int{ {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNumeric}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prALetter}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prExtend}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prExtend}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prALetter}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -357,7 +358,7 @@ var workBreakCodePoints = [][3]int{ {0x0E50, 0x0E59, prNumeric}, // Nd [10] THAI DIGIT ZERO..THAI DIGIT NINE {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNumeric}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0F00, 0x0F00, prALetter}, // Lo TIBETAN SYLLABLE OM {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS @@ -1093,6 +1094,7 @@ var workBreakCodePoints = [][3]int{ {0x10E80, 0x10EA9, prALetter}, // Lo [42] YEZIDI LETTER ELIF..YEZIDI LETTER ET {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EB0, 0x10EB1, prALetter}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prALetter}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F27, 0x10F27, prALetter}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH {0x10F30, 0x10F45, prALetter}, // Lo [22] SOGDIAN LETTER ALEPH..SOGDIAN INDEPENDENT SHIN @@ -1157,6 +1159,8 @@ var workBreakCodePoints = [][3]int{ {0x11235, 0x11235, prExtend}, // Mc KHOJKI SIGN VIRAMA {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prALetter}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prALetter}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prALetter}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prALetter}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -1337,13 +1341,28 @@ var workBreakCodePoints = [][3]int{ {0x11EE0, 0x11EF2, prALetter}, // Lo [19] MAKASAR LETTER KA..MAKASAR ANGKA {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prExtend}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prALetter}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prExtend}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prALetter}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prALetter}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prExtend}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prExtend}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prExtend}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x11F50, 0x11F59, prNumeric}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prALetter}, // Lo LISU LETTER YHA {0x12000, 0x12399, prALetter}, // Lo [922] CUNEIFORM SIGN A..CUNEIFORM SIGN U U {0x12400, 0x1246E, prALetter}, // Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NUMERIC SIGN NINE U VARIANT FORM {0x12480, 0x12543, prALetter}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prALetter}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 - {0x13000, 0x1342E, prALetter}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prFormat}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prALetter}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prFormat}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prALetter}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prALetter}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prALetter}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prALetter}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -1374,6 +1393,7 @@ var workBreakCodePoints = [][3]int{ {0x1AFFD, 0x1AFFE, prKatakana}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B000, prKatakana}, // Lo KATAKANA LETTER ARCHAIC E {0x1B120, 0x1B122, prKatakana}, // Lo [3] KATAKANA LETTER ARCHAIC YI..KATAKANA LETTER ARCHAIC WU + {0x1B155, 0x1B155, prKatakana}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prKatakana}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1BC00, 0x1BC6A, prALetter}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M {0x1BC70, 0x1BC7C, prALetter}, // Lo [13] DUPLOYAN AFFIX LEFT HORIZONTAL SECANT..DUPLOYAN AFFIX ATTACHED TANGENT HOOK @@ -1431,11 +1451,14 @@ var workBreakCodePoints = [][3]int{ {0x1DF00, 0x1DF09, prALetter}, // L& [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prALetter}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prALetter}, // L& [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prALetter}, // L& [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prALetter}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prALetter}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prALetter}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -1446,6 +1469,10 @@ var workBreakCodePoints = [][3]int{ {0x1E2C0, 0x1E2EB, prALetter}, // Lo [44] WANCHO LETTER AA..WANCHO LETTER YIH {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNumeric}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE + {0x1E4D0, 0x1E4EA, prALetter}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prALetter}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNumeric}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prALetter}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prALetter}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prALetter}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -1740,7 +1767,8 @@ var workBreakCodePoints = [][3]int{ {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // E0.0 [2] (🛓..🛔) STUPA..PAGODA {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prExtendedPictographic}, // E13.0 [2] (🛖..🛗) hut..elevator - {0x1F6D8, 0x1F6DC, prExtendedPictographic}, // E0.0 [5] (🛘..🛜) .. + {0x1F6D8, 0x1F6DB, prExtendedPictographic}, // E0.0 [4] (🛘..🛛) .. + {0x1F6DC, 0x1F6DC, prExtendedPictographic}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prExtendedPictographic}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6E0, 0x1F6E5, prExtendedPictographic}, // E0.7 [6] (🛠️..🛥️) hammer and wrench..motor boat {0x1F6E6, 0x1F6E8, prExtendedPictographic}, // E0.0 [3] (🛦..🛨) UP-POINTING MILITARY AIRPLANE..UP-POINTING SMALL AIRPLANE @@ -1757,7 +1785,7 @@ var workBreakCodePoints = [][3]int{ {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // E12.0 [1] (🛺) auto rickshaw {0x1F6FB, 0x1F6FC, prExtendedPictographic}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate {0x1F6FD, 0x1F6FF, prExtendedPictographic}, // E0.0 [3] (🛽..🛿) .. - {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) .. + {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) LOT OF FORTUNE..ORCUS {0x1F7D5, 0x1F7DF, prExtendedPictographic}, // E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE.. {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // E12.0 [12] (🟠..🟫) orange circle..brown square {0x1F7EC, 0x1F7EF, prExtendedPictographic}, // E0.0 [4] (🟬..🟯) .. @@ -1816,30 +1844,37 @@ var workBreakCodePoints = [][3]int{ {0x1FA00, 0x1FA6F, prExtendedPictographic}, // E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.. {0x1FA70, 0x1FA73, prExtendedPictographic}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prExtendedPictographic}, // E13.0 [1] (🩴) thong sandal - {0x1FA75, 0x1FA77, prExtendedPictographic}, // E0.0 [3] (🩵..🩷) .. + {0x1FA75, 0x1FA77, prExtendedPictographic}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prExtendedPictographic}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prExtendedPictographic}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA7D, 0x1FA7F, prExtendedPictographic}, // E0.0 [3] (🩽..🩿) .. {0x1FA80, 0x1FA82, prExtendedPictographic}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prExtendedPictographic}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls - {0x1FA87, 0x1FA8F, prExtendedPictographic}, // E0.0 [9] (🪇..🪏) .. + {0x1FA87, 0x1FA88, prExtendedPictographic}, // E15.0 [2] (🪇..🪈) maracas..flute + {0x1FA89, 0x1FA8F, prExtendedPictographic}, // E0.0 [7] (🪉..🪏) .. {0x1FA90, 0x1FA95, prExtendedPictographic}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prExtendedPictographic}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prExtendedPictographic}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa - {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E0.0 [3] (🪭..🪯) .. + {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prExtendedPictographic}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prExtendedPictographic}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs - {0x1FABB, 0x1FABF, prExtendedPictographic}, // E0.0 [5] (🪻..🪿) .. + {0x1FABB, 0x1FABD, prExtendedPictographic}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABE, 0x1FABE, prExtendedPictographic}, // E0.0 [1] (🪾) + {0x1FABF, 0x1FABF, prExtendedPictographic}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prExtendedPictographic}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prExtendedPictographic}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown - {0x1FAC6, 0x1FACF, prExtendedPictographic}, // E0.0 [10] (🫆..🫏) .. + {0x1FAC6, 0x1FACD, prExtendedPictographic}, // E0.0 [8] (🫆..🫍) .. + {0x1FACE, 0x1FACF, prExtendedPictographic}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prExtendedPictographic}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prExtendedPictographic}, // E14.0 [3] (🫗..🫙) pouring liquid..jar - {0x1FADA, 0x1FADF, prExtendedPictographic}, // E0.0 [6] (🫚..🫟) .. + {0x1FADA, 0x1FADB, prExtendedPictographic}, // E15.0 [2] (🫚..🫛) ginger root..pea pod + {0x1FADC, 0x1FADF, prExtendedPictographic}, // E0.0 [4] (🫜..🫟) .. {0x1FAE0, 0x1FAE7, prExtendedPictographic}, // E14.0 [8] (🫠..🫧) melting face..bubbles - {0x1FAE8, 0x1FAEF, prExtendedPictographic}, // E0.0 [8] (🫨..🫯) .. + {0x1FAE8, 0x1FAE8, prExtendedPictographic}, // E15.0 [1] (🫨) shaking face + {0x1FAE9, 0x1FAEF, prExtendedPictographic}, // E0.0 [7] (🫩..🫯) .. {0x1FAF0, 0x1FAF6, prExtendedPictographic}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands - {0x1FAF7, 0x1FAFF, prExtendedPictographic}, // E0.0 [9] (🫷..🫿) .. + {0x1FAF7, 0x1FAF8, prExtendedPictographic}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand + {0x1FAF9, 0x1FAFF, prExtendedPictographic}, // E0.0 [7] (🫹..🫿) .. {0x1FBF0, 0x1FBF9, prNumeric}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x1FC00, 0x1FFFD, prExtendedPictographic}, // E0.0[1022] (🰀..🿽) .. {0xE0001, 0xE0001, prFormat}, // Cf LANGUAGE TAG diff --git a/vendor/github.com/rivo/uniseg/wordrules.go b/vendor/github.com/rivo/uniseg/wordrules.go index 325407e40..57a8c6831 100644 --- a/vendor/github.com/rivo/uniseg/wordrules.go +++ b/vendor/github.com/rivo/uniseg/wordrules.go @@ -22,82 +22,121 @@ const ( wbZWJBit = 16 // This bit is set for any states followed by at least one zero-width joiner (see WB4 and WB3c). ) -// The word break parser's breaking instructions. -const ( - wbDontBreak = iota - wbBreak -) - -// The word break parser's state transitions. It's anologous to grTransitions, -// see comments there for details. Unicode version 14.0.0. -var wbTransitions = map[[2]int][3]int{ +// wbTransitions implements the word break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func wbTransitions(state, prop int) (newState int, wordBreak bool, rule int) { + switch uint64(state) | uint64(prop)<<32 { // WB3b. - {wbAny, prNewline}: {wbNewline, wbBreak, 32}, - {wbAny, prCR}: {wbCR, wbBreak, 32}, - {wbAny, prLF}: {wbLF, wbBreak, 32}, + case wbAny | prNewline<<32: + return wbNewline, true, 32 + case wbAny | prCR<<32: + return wbCR, true, 32 + case wbAny | prLF<<32: + return wbLF, true, 32 // WB3a. - {wbNewline, prAny}: {wbAny, wbBreak, 31}, - {wbCR, prAny}: {wbAny, wbBreak, 31}, - {wbLF, prAny}: {wbAny, wbBreak, 31}, + case wbNewline | prAny<<32: + return wbAny, true, 31 + case wbCR | prAny<<32: + return wbAny, true, 31 + case wbLF | prAny<<32: + return wbAny, true, 31 // WB3. - {wbCR, prLF}: {wbLF, wbDontBreak, 30}, + case wbCR | prLF<<32: + return wbLF, false, 30 // WB3d. - {wbAny, prWSegSpace}: {wbWSegSpace, wbBreak, 9990}, - {wbWSegSpace, prWSegSpace}: {wbWSegSpace, wbDontBreak, 34}, + case wbAny | prWSegSpace<<32: + return wbWSegSpace, true, 9990 + case wbWSegSpace | prWSegSpace<<32: + return wbWSegSpace, false, 34 // WB5. - {wbAny, prALetter}: {wbALetter, wbBreak, 9990}, - {wbAny, prHebrewLetter}: {wbHebrewLetter, wbBreak, 9990}, - {wbALetter, prALetter}: {wbALetter, wbDontBreak, 50}, - {wbALetter, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 50}, - {wbHebrewLetter, prALetter}: {wbALetter, wbDontBreak, 50}, - {wbHebrewLetter, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 50}, + case wbAny | prALetter<<32: + return wbALetter, true, 9990 + case wbAny | prHebrewLetter<<32: + return wbHebrewLetter, true, 9990 + case wbALetter | prALetter<<32: + return wbALetter, false, 50 + case wbALetter | prHebrewLetter<<32: + return wbHebrewLetter, false, 50 + case wbHebrewLetter | prALetter<<32: + return wbALetter, false, 50 + case wbHebrewLetter | prHebrewLetter<<32: + return wbHebrewLetter, false, 50 // WB7. Transitions to wbWB7 handled by transitionWordBreakState(). - {wbWB7, prALetter}: {wbALetter, wbDontBreak, 70}, - {wbWB7, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 70}, + case wbWB7 | prALetter<<32: + return wbALetter, false, 70 + case wbWB7 | prHebrewLetter<<32: + return wbHebrewLetter, false, 70 // WB7a. - {wbHebrewLetter, prSingleQuote}: {wbAny, wbDontBreak, 71}, + case wbHebrewLetter | prSingleQuote<<32: + return wbAny, false, 71 // WB7c. Transitions to wbWB7c handled by transitionWordBreakState(). - {wbWB7c, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 73}, + case wbWB7c | prHebrewLetter<<32: + return wbHebrewLetter, false, 73 // WB8. - {wbAny, prNumeric}: {wbNumeric, wbBreak, 9990}, - {wbNumeric, prNumeric}: {wbNumeric, wbDontBreak, 80}, + case wbAny | prNumeric<<32: + return wbNumeric, true, 9990 + case wbNumeric | prNumeric<<32: + return wbNumeric, false, 80 // WB9. - {wbALetter, prNumeric}: {wbNumeric, wbDontBreak, 90}, - {wbHebrewLetter, prNumeric}: {wbNumeric, wbDontBreak, 90}, + case wbALetter | prNumeric<<32: + return wbNumeric, false, 90 + case wbHebrewLetter | prNumeric<<32: + return wbNumeric, false, 90 // WB10. - {wbNumeric, prALetter}: {wbALetter, wbDontBreak, 100}, - {wbNumeric, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 100}, + case wbNumeric | prALetter<<32: + return wbALetter, false, 100 + case wbNumeric | prHebrewLetter<<32: + return wbHebrewLetter, false, 100 // WB11. Transitions to wbWB11 handled by transitionWordBreakState(). - {wbWB11, prNumeric}: {wbNumeric, wbDontBreak, 110}, + case wbWB11 | prNumeric<<32: + return wbNumeric, false, 110 // WB13. - {wbAny, prKatakana}: {wbKatakana, wbBreak, 9990}, - {wbKatakana, prKatakana}: {wbKatakana, wbDontBreak, 130}, + case wbAny | prKatakana<<32: + return wbKatakana, true, 9990 + case wbKatakana | prKatakana<<32: + return wbKatakana, false, 130 // WB13a. - {wbAny, prExtendNumLet}: {wbExtendNumLet, wbBreak, 9990}, - {wbALetter, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbHebrewLetter, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbNumeric, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbKatakana, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbExtendNumLet, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, + case wbAny | prExtendNumLet<<32: + return wbExtendNumLet, true, 9990 + case wbALetter | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbHebrewLetter | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbNumeric | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbKatakana | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbExtendNumLet | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 // WB13b. - {wbExtendNumLet, prALetter}: {wbALetter, wbDontBreak, 132}, - {wbExtendNumLet, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 132}, - {wbExtendNumLet, prNumeric}: {wbNumeric, wbDontBreak, 132}, - {wbExtendNumLet, prKatakana}: {prKatakana, wbDontBreak, 132}, + case wbExtendNumLet | prALetter<<32: + return wbALetter, false, 132 + case wbExtendNumLet | prHebrewLetter<<32: + return wbHebrewLetter, false, 132 + case wbExtendNumLet | prNumeric<<32: + return wbNumeric, false, 132 + case wbExtendNumLet | prKatakana<<32: + return wbKatakana, false, 132 + + default: + return -1, false, -1 + } } // transitionWordBreakState determines the new state of the word break parser @@ -141,30 +180,27 @@ func transitionWordBreakState(state int, r rune, b []byte, str string) (newState // Find the applicable transition in the table. var rule int - transition, ok := wbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, wordBreak, rule = transition[0], transition[1] == wbBreak, transition[2] - } else { + newState, wordBreak, rule = wbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := wbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := wbTransitions[[2]int{wbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropState, anyPropWordBreak, anyPropRule := wbTransitions(state, prAny) + anyStateState, anyStateWordBreak, anyStateRule := wbTransitions(wbAny, nextProperty) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, wordBreak, rule = transAnyState[0], transAnyState[1] == wbBreak, transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - wordBreak, rule = transAnyProp[1] == wbBreak, transAnyProp[2] + newState, wordBreak, rule = anyStateState, anyStateWordBreak, anyStateRule + if anyPropRule < anyStateRule { + wordBreak, rule = anyPropWordBreak, anyPropRule } - } else if okAnyProp { + } else if anyPropState >= 0 { // We only have a specific state. - newState, wordBreak, rule = transAnyProp[0], transAnyProp[1] == wbBreak, transAnyProp[2] + newState, wordBreak, rule = anyPropState, anyPropWordBreak, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateState >= 0 { // We only have a specific property. - newState, wordBreak, rule = transAnyState[0], transAnyState[1] == wbBreak, transAnyState[2] + newState, wordBreak, rule = anyStateState, anyStateWordBreak, anyStateRule } else { // No known transition. WB999: Any ÷ Any. newState, wordBreak, rule = wbAny, true, 9990 diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go index d38d707db..38f80d5ae 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go +++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -338,7 +338,7 @@ func (e Extensions) Render() ([]pkix.Extension, error) { return exts, nil } -func parseExtensions(ext []pkix.Extension) (Extensions, error) { +func ParseExtensions(ext []pkix.Extension) (Extensions, error) { out := Extensions{} for _, e := range ext { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go index 2263abd78..93dce8715 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go @@ -39,8 +39,7 @@ import ( type CoseV001Schema struct { // data - // Required: true - Data *CoseV001SchemaData `json:"data"` + Data *CoseV001SchemaData `json:"data,omitempty"` // The COSE Sign1 Message // Format: byte @@ -71,9 +70,8 @@ func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { } func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { - - if err := validate.Required("data", "body", m.Data); err != nil { - return err + if swag.IsZero(m.Data) { // not required + return nil } if m.Data != nil { @@ -117,6 +115,10 @@ func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt if m.Data != nil { + if swag.IsZero(m.Data) { // not required + return nil + } + if err := m.Data.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go index f8bf233ed..3b906ae29 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go @@ -277,10 +277,10 @@ type HashedrekordV001SchemaDataHash struct { // The hashing function used to compute the hash value // Required: true - // Enum: [sha256] + // Enum: [sha256 sha384 sha512] Algorithm *string `json:"algorithm"` - // The hash value for the content + // The hash value for the content, as represented by a lower case hexadecimal string // Required: true Value *string `json:"value"` } @@ -307,7 +307,7 @@ var hashedrekordV001SchemaDataHashTypeAlgorithmPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["sha256","sha384","sha512"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -319,6 +319,12 @@ const ( // HashedrekordV001SchemaDataHashAlgorithmSha256 captures enum value "sha256" HashedrekordV001SchemaDataHashAlgorithmSha256 string = "sha256" + + // HashedrekordV001SchemaDataHashAlgorithmSha384 captures enum value "sha384" + HashedrekordV001SchemaDataHashAlgorithmSha384 string = "sha384" + + // HashedrekordV001SchemaDataHashAlgorithmSha512 captures enum value "sha512" + HashedrekordV001SchemaDataHashAlgorithmSha512 string = "sha512" ) // prop value enum diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go index ec271c174..ee32ded41 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go @@ -95,7 +95,7 @@ type LogEntryAnon struct { // Required: true Body interface{} `json:"body"` - // integrated time + // The time the entry was added to the log as a Unix timestamp in seconds // Required: true IntegratedTime *int64 `json:"integratedTime"` diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go index 57199db45..9cee68d13 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go @@ -20,9 +20,11 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "encoding/asn1" "errors" "fmt" "io" + "math/big" "github.com/sigstore/sigstore/pkg/signature/options" ) @@ -190,8 +192,23 @@ func (e ECDSAVerifier) VerifySignature(signature, message io.Reader, opts ...Ver return fmt.Errorf("invalid ECDSA public key for %s", e.publicKey.Params().Name) } - if !ecdsa.VerifyASN1(e.publicKey, digest, sigBytes) { - return errors.New("invalid signature when validating ASN.1 encoded signature") + asnParseTest := struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(sigBytes, &asnParseTest); err == nil { + if !ecdsa.VerifyASN1(e.publicKey, digest, sigBytes) { + return errors.New("invalid signature when validating ASN.1 encoded signature") + } + } else { + // deal with IEEE P1363 encoding of signatures + if len(sigBytes) == 0 || len(sigBytes) > 132 || len(sigBytes)%2 != 0 { + return errors.New("ecdsa: Invalid IEEE_P1363 encoded bytes") + } + r := new(big.Int).SetBytes(sigBytes[:len(sigBytes)/2]) + s := new(big.Int).SetBytes(sigBytes[len(sigBytes)/2:]) + if !ecdsa.Verify(e.publicKey, digest, r, s) { + return errors.New("invalid signature when validating IEEE_P1363 encoded signature") + } } return nil diff --git a/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml b/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml index f5f274f96..45c00cb9c 100644 --- a/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml +++ b/vendor/github.com/stefanberger/go-pkcs11uri/.travis.yml @@ -5,7 +5,7 @@ os: - linux go: - - "1.13.x" + - "1.19.x" matrix: include: @@ -17,7 +17,7 @@ addons: - softhsm2 install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.30.0 + - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.53.2 script: - make diff --git a/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go b/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go index 39b06548e..82c32e3c8 100644 --- a/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go +++ b/vendor/github.com/stefanberger/go-pkcs11uri/pkcs11uri.go @@ -19,7 +19,6 @@ package pkcs11uri import ( "errors" "fmt" - "io/ioutil" "net/url" "os" "path/filepath" @@ -128,6 +127,12 @@ func (uri *Pkcs11URI) SetPathAttribute(name, value string) error { return uri.setAttribute(uri.pathAttributes, name, value) } +// SetPathAttributeUnencoded sets the value for a path attribute given as byte[]. +// The value must not have been pct-encoded already. +func (uri *Pkcs11URI) SetPathAttributeUnencoded(name string, value []byte) { + uri.pathAttributes[name] = string(value) +} + // AddPathAttribute adds a path attribute; it returns an error if an attribute with the same // name already existed or if the given value cannot be pct-unescaped func (uri *Pkcs11URI) AddPathAttribute(name, value string) error { @@ -137,6 +142,16 @@ func (uri *Pkcs11URI) AddPathAttribute(name, value string) error { return uri.SetPathAttribute(name, value) } +// AddPathAttributeUnencoded adds a path attribute given as byte[] which must not already be pct-encoded; +// it returns an error if an attribute with the same name already existed +func (uri *Pkcs11URI) AddPathAttributeUnencoded(name string, value []byte) error { + if _, ok := uri.pathAttributes[name]; ok { + return errors.New("duplicate path attribute") + } + uri.SetPathAttributeUnencoded(name, value) + return nil +} + // RemovePathAttribute removes a path attribute func (uri *Pkcs11URI) RemovePathAttribute(name string) { delete(uri.pathAttributes, name) @@ -173,6 +188,12 @@ func (uri *Pkcs11URI) SetQueryAttribute(name, value string) error { return uri.setAttribute(uri.queryAttributes, name, value) } +// SetQueryAttributeUnencoded sets the value for a quiery attribute given as byte[]. +// The value must not have been pct-encoded already. +func (uri *Pkcs11URI) SetQueryAttributeUnencoded(name string, value []byte) { + uri.queryAttributes[name] = string(value) +} + // AddQueryAttribute adds a query attribute; it returns an error if an attribute with the same // name already existed or if the given value cannot be pct-unescaped func (uri *Pkcs11URI) AddQueryAttribute(name, value string) error { @@ -182,6 +203,16 @@ func (uri *Pkcs11URI) AddQueryAttribute(name, value string) error { return uri.SetQueryAttribute(name, value) } +// AddQueryAttributeUnencoded adds a query attribute given as byte[] which must not already be pct-encoded; +// it returns an error if an attribute with the same name already existed +func (uri *Pkcs11URI) AddQueryAttributeUnencoded(name string, value []byte) error { + if _, ok := uri.queryAttributes[name]; ok { + return errors.New("duplicate query attribute") + } + uri.SetQueryAttributeUnencoded(name, value) + return nil +} + // RemoveQueryAttribute removes a path attribute func (uri *Pkcs11URI) RemoveQueryAttribute(name string) { delete(uri.queryAttributes, name) @@ -257,7 +288,7 @@ func (uri *Pkcs11URI) GetPIN() (string, error) { if !filepath.IsAbs(pinuri.Path) { return "", fmt.Errorf("PIN URI path '%s' is not absolute", pinuri.Path) } - pin, err := ioutil.ReadFile(pinuri.Path) + pin, err := os.ReadFile(pinuri.Path) if err != nil { return "", fmt.Errorf("Could not open PIN file: %s", err) } @@ -426,7 +457,7 @@ func (uri *Pkcs11URI) GetModule() (string, error) { moduleName = strings.ToLower(moduleName) for _, dir := range searchdirs { - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) if err != nil { continue } diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go index 40318aba6..822ca37dd 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021-2023, Sylabs Inc. All rights reserved. +// Copyright (c) 2021-2024, Sylabs Inc. All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the // LICENSE file distributed with the sources of this project regarding your // rights to use or distribute this software. @@ -95,7 +95,7 @@ func OptObjectTime(t time.Time) DescriptorInputOpt { // OptMetadata marshals metadata from md into the "extra" field of d. func OptMetadata(md encoding.BinaryMarshaler) DescriptorInputOpt { - return func(t DataType, opts *descriptorOpts) error { + return func(_ DataType, opts *descriptorOpts) error { opts.md = md return nil } diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md index 554718521..56d49275a 100644 --- a/vendor/github.com/ulikunitz/xz/README.md +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -75,3 +75,14 @@ To decompress it use the following command. $ gxz -d bigfile.xz +## Security & Vulnerabilities + +The security policy is documented in [SECURITY.md](SECURITY.md). + +The software is not affected by the supply chain attack on the original xz +implementation, [CVE-2024-3094](https://nvd.nist.gov/vuln/detail/CVE-2024-3094). +This implementation doesn't share any files with the original xz implementation +and no patches or pull requests are accepted without a review. + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md index 5f7ec01b3..1bdc88878 100644 --- a/vendor/github.com/ulikunitz/xz/SECURITY.md +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -6,5 +6,14 @@ Currently the last minor version v0.5.x is supported. ## Reporting a Vulnerability -Report a vulnerability by creating a Github issue at -. Expect a response in a week. +You can privately report a vulnerability following this +[procedure](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +Alternatively you can create a Github issue at +. + +In both cases expect a response in at least 7 days. + +## Security Advisories + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index a3d6f1925..c466ffeda 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -86,6 +86,11 @@ ## Log +### 2024-04-03 + +Release v0.5.12 updates README.md and SECURITY.md to address the supply chain +attack on the original xz implementation. + ### 2022-12-12 Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation @@ -99,7 +104,7 @@ it. Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The function allocated a slice of records immediately after reading the value -without further checks. Sincex the number has been too large the make function +without further checks. Since the number has been too large the make function did panic. The fix is to check the number against the expected number of records before allocating the records. diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index bca798298..b78fa47d1 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -44,12 +44,11 @@ type bState struct { rmOnComplete bool noPop bool autoRefresh bool - aDecorators []decor.Decorator - pDecorators []decor.Decorator + buffers [3]*bytes.Buffer + decorators [2][]decor.Decorator averageDecorators []decor.AverageDecorator ewmaDecorators []decor.EwmaDecorator shutdownListeners []decor.ShutdownListener - buffers [3]*bytes.Buffer filler BarFiller extender extenderFunc renderReq chan<- time.Time @@ -159,10 +158,7 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { iter := make(chan decor.Decorator) select { case b.operateState <- func(s *bState) { - for _, decorators := range [][]decor.Decorator{ - s.pDecorators, - s.aDecorators, - } { + for _, decorators := range s.decorators { for _, d := range decorators { iter <- d } @@ -250,9 +246,7 @@ func (b *Bar) EwmaSetCurrent(current int64, iterDur time.Duration) { } select { case b.operateState <- func(s *bState) { - if n := current - s.current; n > 0 { - s.decoratorEwmaUpdate(n, iterDur) - } + s.decoratorEwmaUpdate(current-s.current, iterDur) s.current = current if s.triggerComplete && s.current >= s.total { s.current = s.total @@ -411,22 +405,21 @@ func (b *Bar) serve(ctx context.Context, bs *bState) { func (b *Bar) render(tw int) { fn := func(s *bState) { - var rows []io.Reader + frame := new(renderFrame) stat := newStatistics(tw, s) r, err := s.draw(stat) if err != nil { - b.frameCh <- &renderFrame{err: err} + for _, buf := range s.buffers { + buf.Reset() + } + frame.err = err + b.frameCh <- frame return } - rows = append(rows, r) + frame.rows = append(frame.rows, r) if s.extender != nil { - rows, err = s.extender(rows, stat) - if err != nil { - b.frameCh <- &renderFrame{err: err} - return - } + frame.rows, frame.err = s.extender(frame.rows, stat) } - frame := &renderFrame{rows: rows} if s.completed || s.aborted { frame.shutdown = s.shutdown frame.rmOnComplete = s.rmOnComplete @@ -484,18 +477,7 @@ func (b *Bar) wSyncTable() syncTable { } } -func (s *bState) draw(stat decor.Statistics) (io.Reader, error) { - r, err := s.drawImpl(stat) - if err != nil { - for _, b := range s.buffers { - b.Reset() - } - return nil, err - } - return io.MultiReader(r, strings.NewReader("\n")), nil -} - -func (s *bState) drawImpl(stat decor.Statistics) (io.Reader, error) { +func (s *bState) draw(stat decor.Statistics) (_ io.Reader, err error) { decorFiller := func(buf *bytes.Buffer, decorators []decor.Decorator) (err error) { for _, d := range decorators { // need to call Decor in any case becase of width synchronization @@ -515,45 +497,45 @@ func (s *bState) drawImpl(stat decor.Statistics) (io.Reader, error) { return err } - bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2] - - err := eitherError(decorFiller(bufP, s.pDecorators), decorFiller(bufA, s.aDecorators)) - if err != nil { - return nil, err - } - - if !s.trimSpace && stat.AvailableWidth >= 2 { - stat.AvailableWidth -= 2 - writeFiller := func(buf *bytes.Buffer) error { - return s.filler.Fill(buf, stat) - } - for _, fn := range []func(*bytes.Buffer) error{ - writeSpace, - writeFiller, - writeSpace, - } { - if err := fn(bufB); err != nil { - return nil, err - } - } - } else { - err := s.filler.Fill(bufB, stat) + for i, buf := range s.buffers[:2] { + err = decorFiller(buf, s.decorators[i]) if err != nil { return nil, err } } - return io.MultiReader(bufP, bufB, bufA), nil + spaces := []io.Reader{ + strings.NewReader(" "), + strings.NewReader(" "), + } + if s.trimSpace || stat.AvailableWidth < 2 { + for _, r := range spaces { + _, _ = io.Copy(io.Discard, r) + } + } else { + stat.AvailableWidth -= 2 + } + + err = s.filler.Fill(s.buffers[2], stat) + if err != nil { + return nil, err + } + + return io.MultiReader( + s.buffers[0], + spaces[0], + s.buffers[2], + spaces[1], + s.buffers[1], + strings.NewReader("\n"), + ), nil } func (s *bState) wSyncTable() (table syncTable) { var count int var row []chan int - for i, decorators := range [][]decor.Decorator{ - s.pDecorators, - s.aDecorators, - } { + for i, decorators := range s.decorators { for _, d := range decorators { if ch, ok := d.Sync(); ok { row = append(row, ch) @@ -640,16 +622,3 @@ func unwrap(d decor.Decorator) decor.Decorator { } return d } - -func writeSpace(buf *bytes.Buffer) error { - return buf.WriteByte(' ') -} - -func eitherError(errors ...error) error { - for _, err := range errors { - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_option.go b/vendor/github.com/vbauerster/mpb/v8/bar_option.go index d3cb3e2c0..d5aa5b9fa 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_option.go @@ -20,19 +20,19 @@ func inspect(decorators []decor.Decorator) (dest []decor.Decorator) { return } -// AppendDecorators let you inject decorators to the bar's right side. -func AppendDecorators(decorators ...decor.Decorator) BarOption { - decorators = inspect(decorators) - return func(s *bState) { - s.aDecorators = decorators - } -} - // PrependDecorators let you inject decorators to the bar's left side. func PrependDecorators(decorators ...decor.Decorator) BarOption { decorators = inspect(decorators) return func(s *bState) { - s.pDecorators = decorators + s.decorators[0] = decorators + } +} + +// AppendDecorators let you inject decorators to the bar's right side. +func AppendDecorators(decorators ...decor.Decorator) BarOption { + decorators = inspect(decorators) + return func(s *bState) { + s.decorators[1] = decorators } } @@ -129,11 +129,11 @@ func makeExtenderFunc(filler BarFiller, rev bool) extenderFunc { for { b, err := buf.ReadBytes('\n') if err != nil { + buf.Reset() break } rows = append(rows, bytes.NewReader(b)) } - buf.Reset() return rows, err } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go index 31062ebd3..6bec1151b 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go @@ -85,7 +85,7 @@ type Synchronizer interface { // in order to format string according to decor.WC settings. // No need to implement manually as long as decor.WC is embedded. type Formatter interface { - Format(string) (str string, viewWidth int) + Format(string) (_ string, width int) } // Wrapper interface. @@ -138,17 +138,17 @@ type WC struct { // Format should be called by any Decorator implementation. // Returns formatted string and its view (visual) width. func (wc WC) Format(str string) (string, int) { - viewWidth := runewidth.StringWidth(str) - if wc.W > viewWidth { - viewWidth = wc.W + width := runewidth.StringWidth(str) + if wc.W > width { + width = wc.W } else if (wc.C & DextraSpace) != 0 { - viewWidth++ + width++ } if (wc.C & DSyncWidth) != 0 { - wc.wsync <- viewWidth - viewWidth = <-wc.wsync + wc.wsync <- width + width = <-wc.wsync } - return wc.fill(str, viewWidth), viewWidth + return wc.fill(str, width), width } // Init initializes width related config. diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index ecf87b186..c4cb2a14b 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -39,7 +39,7 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator { } else { average = ewma.NewMovingAverage(age) } - return MovingAverageETA(style, NewThreadSafeMovingAverage(average), nil, wcc...) + return MovingAverageETA(style, average, nil, wcc...) } // MovingAverageETA decorator relies on MovingAverage implementation to calculate its average. diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go b/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go index a1be8ada2..165ef1eb5 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/moving_average.go @@ -70,5 +70,5 @@ func (s *medianWindow) Set(value float64) { // NewMedian is fixed last 3 samples median MovingAverage. func NewMedian() ewma.MovingAverage { - return NewThreadSafeMovingAverage(new(medianWindow)) + return new(medianWindow) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go index 50a1dfbb5..3e35ddfd8 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go @@ -56,7 +56,7 @@ type onAbortMetaWrapper struct { } func (d onAbortMetaWrapper) Decor(s Statistics) (string, int) { - if s.Completed { + if s.Aborted { str, width := d.Decorator.Decor(s) return d.fn(str), width } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go index f3355c425..a90a8edd8 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -46,7 +46,7 @@ func EwmaSpeed(unit interface{}, format string, age float64, wcc ...WC) Decorato } else { average = ewma.NewMovingAverage(age) } - return MovingAverageSpeed(unit, format, NewThreadSafeMovingAverage(average), wcc...) + return MovingAverageSpeed(unit, format, average, wcc...) } // MovingAverageSpeed decorator relies on MovingAverage implementation @@ -82,7 +82,7 @@ type movingAverageSpeed struct { zDur time.Duration } -func (d *movingAverageSpeed) Decor(s Statistics) (string, int) { +func (d *movingAverageSpeed) Decor(_ Statistics) (string, int) { var str string // ewma implementation may return 0 before accumulating certain number of samples if v := d.average.Value(); v != 0 { diff --git a/vendor/github.com/vbauerster/mpb/v8/internal/width.go b/vendor/github.com/vbauerster/mpb/v8/internal/width.go index 7677e404a..842e811f0 100644 --- a/vendor/github.com/vbauerster/mpb/v8/internal/width.go +++ b/vendor/github.com/vbauerster/mpb/v8/internal/width.go @@ -3,7 +3,7 @@ package internal // CheckRequestedWidth checks that requested width doesn't overflow // available width func CheckRequestedWidth(requested, available int) int { - if requested < 1 || requested >= available { + if requested < 1 || requested > available { return available } return requested diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index 3bdc75b42..014a0e4a3 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -14,9 +14,7 @@ import ( "github.com/vbauerster/mpb/v8/decor" ) -const ( - defaultRefreshRate = 150 * time.Millisecond -) +const defaultRefreshRate = 150 * time.Millisecond // DoneError represents use after `(*Progress).Wait()` error. var DoneError = fmt.Errorf("%T instance can't be reused after %[1]T.Wait()", (*Progress)(nil)) @@ -467,9 +465,9 @@ func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption } } - for i := 0; i < len(bs.buffers); i++ { - bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512)) - } + bs.buffers[0] = bytes.NewBuffer(make([]byte, 0, 128)) // prepend + bs.buffers[1] = bytes.NewBuffer(make([]byte, 0, 128)) // append + bs.buffers[2] = bytes.NewBuffer(make([]byte, 0, 256)) // filler return bs } diff --git a/vendor/github.com/vmware/govmomi/govc/flags/datastore.go b/vendor/github.com/vmware/govmomi/govc/flags/datastore.go index 8030a6e00..7ca7c1a71 100644 --- a/vendor/github.com/vmware/govmomi/govc/flags/datastore.go +++ b/vendor/github.com/vmware/govmomi/govc/flags/datastore.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2014-2016 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,9 +20,12 @@ import ( "context" "flag" "fmt" + "net/url" "os" "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vapi/library" + "github.com/vmware/govmomi/vapi/library/finder" "github.com/vmware/govmomi/vim25/types" ) @@ -148,3 +151,59 @@ func (f *DatastoreFlag) Stat(ctx context.Context, file string) (types.BaseFileIn return ds.Stat(ctx, file) } + +func (f *DatastoreFlag) libraryPath(ctx context.Context, p string) (string, error) { + vc, err := f.Client() + if err != nil { + return "", err + } + + rc, err := f.RestClient() + if err != nil { + return "", err + } + + m := library.NewManager(rc) + + r, err := finder.NewFinder(m).Find(ctx, p) + if err != nil { + return "", err + } + + if len(r) != 1 { + return "", fmt.Errorf("%s: %d found", p, len(r)) + } + + return finder.NewPathFinder(m, vc).Path(ctx, r[0]) +} + +// FileBacking converts the given file path for use as VirtualDeviceFileBackingInfo.FileName. +func (f *DatastoreFlag) FileBacking(ctx context.Context, file string, stat bool) (string, error) { + u, err := url.Parse(file) + if err != nil { + return "", err + } + + switch u.Scheme { + case "library": + return f.libraryPath(ctx, u.Path) + case "ds": + // datastore url, e.g. ds:///vmfs/volumes/$uuid/... + return file, nil + } + + var p object.DatastorePath + if p.FromString(file) { + // datastore is specified + return file, nil + } + + if stat { + // Verify ISO exists + if _, err := f.Stat(ctx, file); err != nil { + return "", err + } + } + + return f.DatastorePath(file) +} diff --git a/vendor/github.com/vmware/govmomi/govc/importx/spec.go b/vendor/github.com/vmware/govmomi/govc/importx/spec.go index 09763e2b1..acb1729f1 100644 --- a/vendor/github.com/vmware/govmomi/govc/importx/spec.go +++ b/vendor/github.com/vmware/govmomi/govc/importx/spec.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2015-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,28 +31,11 @@ import ( ) var ( - allDiskProvisioningOptions = []string{ - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeFlat), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeMonolithicSparse), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeMonolithicFlat), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentSparse), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentFlat), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeThin), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeThick), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeSeSparse), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick), - string(types.OvfCreateImportSpecParamsDiskProvisioningTypeSparse), - } - allIPAllocationPolicyOptions = []string{ - string(types.VAppIPAssignmentInfoIpAllocationPolicyDhcpPolicy), - string(types.VAppIPAssignmentInfoIpAllocationPolicyTransientPolicy), - string(types.VAppIPAssignmentInfoIpAllocationPolicyFixedPolicy), - string(types.VAppIPAssignmentInfoIpAllocationPolicyFixedAllocatedPolicy), - } - allIPProtocolOptions = []string{ - string(types.VAppIPAssignmentInfoProtocolsIPv4), - string(types.VAppIPAssignmentInfoProtocolsIPv6), - } + allDiskProvisioningOptions = types.OvfCreateImportSpecParamsDiskProvisioningType("").Strings() + + allIPAllocationPolicyOptions = types.VAppIPAssignmentInfoIpAllocationPolicy("").Strings() + + allIPProtocolOptions = types.VAppIPAssignmentInfoProtocols("").Strings() ) type spec struct { diff --git a/vendor/github.com/vmware/govmomi/govc/vm/change.go b/vendor/github.com/vmware/govmomi/govc/vm/change.go index c1be6ccd5..a5a73b531 100644 --- a/vendor/github.com/vmware/govmomi/govc/vm/change.go +++ b/vendor/github.com/vmware/govmomi/govc/vm/change.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2015-2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -84,11 +84,7 @@ func init() { cli.Register("vm.change", &change{}) } -var latencyLevels = []string{ - string(types.LatencySensitivitySensitivityLevelLow), - string(types.LatencySensitivitySensitivityLevelNormal), - string(types.LatencySensitivitySensitivityLevelHigh), -} +var latencyLevels = types.LatencySensitivitySensitivityLevel("").Strings() // setLatency validates latency level if set func (cmd *change) setLatency() error { @@ -106,11 +102,7 @@ func (cmd *change) setLatency() error { return fmt.Errorf("latency must be one of: %s", strings.Join(latencyLevels, "|")) } -var hwUpgradePolicies = []string{ - string(types.ScheduledHardwareUpgradeInfoHardwareUpgradePolicyOnSoftPowerOff), - string(types.ScheduledHardwareUpgradeInfoHardwareUpgradePolicyNever), - string(types.ScheduledHardwareUpgradeInfoHardwareUpgradePolicyAlways), -} +var hwUpgradePolicies = types.ScheduledHardwareUpgradeInfoHardwareUpgradePolicy("").Strings() // setHwUpgradePolicy validates hwUpgradePolicy if set func (cmd *change) setHwUpgradePolicy() error { diff --git a/vendor/github.com/vmware/govmomi/govc/vm/create.go b/vendor/github.com/vmware/govmomi/govc/vm/create.go index bfae8d67b..46c3a57de 100644 --- a/vendor/github.com/vmware/govmomi/govc/vm/create.go +++ b/vendor/github.com/vmware/govmomi/govc/vm/create.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2014-2016 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,8 +20,11 @@ import ( "context" "flag" "fmt" + "io" "strings" + "text/tabwriter" + "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/govc/cli" "github.com/vmware/govmomi/govc/flags" "github.com/vmware/govmomi/object" @@ -33,10 +36,8 @@ import ( ) var ( - FirmwareTypes = []string{ - string(types.GuestOsDescriptorFirmwareTypeBios), - string(types.GuestOsDescriptorFirmwareTypeEfi), - } + FirmwareTypes = types.GuestOsDescriptorFirmwareType("").Strings() + FirmwareUsage = fmt.Sprintf("Firmware type [%s]", strings.Join(FirmwareTypes, "|")) ) @@ -62,10 +63,11 @@ type create struct { annotation string firmware string version string + place bool + profile string iso string isoDatastoreFlag *flags.DatastoreFlag - isoDatastore *object.Datastore disk string diskDatastoreFlag *flags.DatastoreFlag @@ -125,8 +127,11 @@ func (cmd *create) Register(ctx context.Context, f *flag.FlagSet) { f.BoolVar(&cmd.force, "force", false, "Create VM if vmx already exists") f.StringVar(&cmd.controller, "disk.controller", "scsi", "Disk controller type") f.StringVar(&cmd.annotation, "annotation", "", "VM description") - f.StringVar(&cmd.firmware, "firmware", FirmwareTypes[0], FirmwareUsage) + f.StringVar(&cmd.profile, "profile", "", "Storage profile name or ID") + if cli.ShowUnreleased() { + f.BoolVar(&cmd.place, "place", false, "Place VM without creating") + } esxiVersions := types.GetESXiVersions() esxiVersionStrings := make([]string, len(esxiVersions)) @@ -197,6 +202,7 @@ https://code.vmware.com/apis/358/vsphere/doc/vim.vm.GuestOsDescriptor.GuestOsIde Examples: govc vm.create -on=false vm-name + govc vm.create -iso library:/boot/linux/ubuntu.iso vm-name # Content Library ISO govc vm.create -cluster cluster1 vm-name # use compute cluster placement govc vm.create -datastore-cluster dscluster vm-name # use datastore cluster placement govc vm.create -m 2048 -c 2 -g freebsd64Guest -net.adapter vmxnet3 -disk.controller pvscsi vm-name` @@ -269,15 +275,11 @@ func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { // Verify ISO exists if cmd.iso != "" { - _, err = cmd.isoDatastoreFlag.Stat(ctx, cmd.iso) - if err != nil { - return err - } - - cmd.isoDatastore, err = cmd.isoDatastoreFlag.Datastore() + iso, err := cmd.isoDatastoreFlag.FileBacking(ctx, cmd.iso, true) if err != nil { return err } + cmd.iso = iso } // Verify disk exists @@ -305,7 +307,9 @@ func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { if err != nil { return err } - + if cmd.place { + return nil + } info, err := task.WaitForResult(ctx, nil) if err != nil { return err @@ -328,6 +332,61 @@ func (cmd *create) Run(ctx context.Context, f *flag.FlagSet) error { return nil } +type place struct { + Spec types.PlacementSpec `json:"spec"` + Recommendations []types.ClusterRecommendation `json:"recommendations"` + + ctx context.Context + cmd *create +} + +func (p *place) Dump() interface{} { + return p.Recommendations +} + +func (p *place) action(w io.Writer, r types.ClusterRecommendation, a *types.PlacementAction) error { + spec := a.RelocateSpec + if spec == nil { + return nil + } + + fields := []struct { + name string + moid *types.ManagedObjectReference + }{ + {"Target", r.Target}, + {" Folder", spec.Folder}, + {" Datastore", spec.Datastore}, + {" Pool", spec.Pool}, + {" Host", spec.Host}, + } + + for _, f := range fields { + if f.moid == nil { + continue + } + path, err := find.InventoryPath(p.ctx, p.cmd.Client, *f.moid) + if err != nil { + return err + } + fmt.Fprintf(w, "%s:\t%s\n", f.name, path) + } + + return nil +} + +func (p *place) Write(w io.Writer) error { + tw := tabwriter.NewWriter(w, 2, 0, 2, ' ', 0) + + for _, r := range p.Recommendations { + for _, a := range r.Action { + p.action(tw, r, a.(*types.PlacementAction)) + } + } + + return tw.Flush() +} + func (cmd *create) createVM(ctx context.Context) (*object.Task, error) { var devices object.VirtualDeviceList var err error @@ -352,6 +411,24 @@ func (cmd *create) createVM(ctx context.Context) (*object.Task, error) { Version: cmd.version, } + if cmd.profile != "" { + c, err := cmd.PbmClient() + if err != nil { + return nil, err + } + m, err := c.ProfileMap(ctx) + if err != nil { + return nil, err + } + p, ok := m.Name[cmd.profile] + if !ok { + return nil, fmt.Errorf("profile %q not found", cmd.profile) + } + spec.VmProfile = []types.BaseVirtualMachineProfileSpec{&types.VirtualMachineDefinedProfileSpec{ + ProfileId: p.GetPbmProfile().ProfileId.UniqueId, + }} + } + devices, err = cmd.addStorage(nil) if err != nil { return nil, err @@ -390,6 +467,9 @@ func (cmd *create) createVM(ctx context.Context) (*object.Task, error) { } recs := result.Recommendations + if cmd.place { + return nil, cmd.WriteResult(&place{pspec, recs, ctx, cmd}) + } if len(recs) == 0 { return nil, fmt.Errorf("no cluster recommendations") } @@ -507,7 +587,7 @@ func (cmd *create) addStorage(devices object.VirtualDeviceList) (object.VirtualD return nil, err } - cdrom = devices.InsertIso(cdrom, cmd.isoDatastore.Path(cmd.iso)) + cdrom = devices.InsertIso(cdrom, cmd.iso) devices = append(devices, cdrom) } diff --git a/vendor/github.com/vmware/govmomi/govc/vm/customize.go b/vendor/github.com/vmware/govmomi/govc/vm/customize.go index 356ab6f2a..e7f0e4b61 100644 --- a/vendor/github.com/vmware/govmomi/govc/vm/customize.go +++ b/vendor/github.com/vmware/govmomi/govc/vm/customize.go @@ -45,6 +45,8 @@ type customize struct { dnsserver flags.StringList dnssuffix flags.StringList kind string + username string + org string } func init() { @@ -75,6 +77,8 @@ func (cmd *customize) Register(ctx context.Context, f *flag.FlagSet) { f.Var(&cmd.dnssuffix, "dns-suffix", "DNS suffix list") cmd.dnssuffix = nil f.StringVar(&cmd.kind, "type", "Linux", "Customization type if spec NAME is not specified (Linux|Windows)") + f.StringVar(&cmd.username, "username", "", "Windows only : full name of the end user in firstname lastname format") + f.StringVar(&cmd.org, "org", "", "Windows only : name of the org that owns the VM") } func (cmd *customize) Usage() string { @@ -211,12 +215,12 @@ func (cmd *customize) Run(ctx context.Context, f *flag.FlagSet) error { sysprep, isWindows := spec.Identity.(*types.CustomizationSysprep) linprep, _ := spec.Identity.(*types.CustomizationLinuxPrep) - if cmd.domain != "" { - if isWindows { - sysprep.Identification.JoinDomain = cmd.domain - } else { - linprep.Domain = cmd.domain - } + if isWindows { + sysprep.Identification.JoinDomain = cmd.domain + sysprep.UserData.FullName = cmd.username + sysprep.UserData.OrgName = cmd.org + } else { + linprep.Domain = cmd.domain } if len(cmd.dnsserver) != 0 { diff --git a/vendor/github.com/vmware/govmomi/internal/version/version.go b/vendor/github.com/vmware/govmomi/internal/version/version.go index 51ca9c457..f1e0f3d0e 100644 --- a/vendor/github.com/vmware/govmomi/internal/version/version.go +++ b/vendor/github.com/vmware/govmomi/internal/version/version.go @@ -21,5 +21,5 @@ const ( ClientName = "govmomi" // ClientVersion is the version of this SDK - ClientVersion = "0.37.3" + ClientVersion = "0.38.0" ) diff --git a/vendor/github.com/vmware/govmomi/object/host_certificate_info.go b/vendor/github.com/vmware/govmomi/object/host_certificate_info.go index 1a3a7fab5..0d56e7e7b 100644 --- a/vendor/github.com/vmware/govmomi/object/host_certificate_info.go +++ b/vendor/github.com/vmware/govmomi/object/host_certificate_info.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2016-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2016-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +21,8 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/asn1" + "encoding/pem" + "errors" "fmt" "io" "net/url" @@ -66,6 +68,18 @@ func (info *HostCertificateInfo) FromCertificate(cert *x509.Certificate) *HostCe return info } +func (info *HostCertificateInfo) FromPEM(cert []byte) (*HostCertificateInfo, error) { + block, _ := pem.Decode(cert) + if block == nil { + return nil, errors.New("failed to pem.Decode cert") + } + x, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + return info.FromCertificate(x), nil +} + // FromURL connects to the given URL.Host via tls.Dial with the given tls.Config and populates the HostCertificateInfo // via tls.ConnectionState. If the certificate was verified with the given tls.Config, the Err field will be nil. // Otherwise, Err will be set to the x509.UnknownAuthorityError or x509.HostnameError. diff --git a/vendor/github.com/vmware/govmomi/object/host_certificate_manager.go b/vendor/github.com/vmware/govmomi/object/host_certificate_manager.go index ddf1d8c59..30787e7a4 100644 --- a/vendor/github.com/vmware/govmomi/object/host_certificate_manager.go +++ b/vendor/github.com/vmware/govmomi/object/host_certificate_manager.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2016 VMware, Inc. All Rights Reserved. +Copyright (c) 2016-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +23,7 @@ import ( "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) @@ -117,7 +118,13 @@ func (m HostCertificateManager) InstallServerCertificate(ctx context.Context, ce Req: &types.Refresh{This: m.Reference()}, } - return m.Client().RoundTrip(ctx, &body, &body) + err = m.Client().RoundTrip(ctx, &body, &body) + if err != nil && soap.IsSoapFault(err) { + if _, ok := soap.ToSoapFault(err).VimFault().(types.MethodNotFound); ok { + return nil + } + } + return err } // ListCACertificateRevocationLists returns the SSL CRLs of Certificate Authorities that are trusted by the host system. diff --git a/vendor/github.com/vmware/govmomi/object/option_value_list.go b/vendor/github.com/vmware/govmomi/object/option_value_list.go new file mode 100644 index 000000000..9f91253cc --- /dev/null +++ b/vendor/github.com/vmware/govmomi/object/option_value_list.go @@ -0,0 +1,206 @@ +/* +Copyright (c) 2024-2024 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "fmt" + "reflect" + + "github.com/vmware/govmomi/vim25/types" +) + +// OptionValueList simplifies manipulation of properties that are arrays of +// types.BaseOptionValue, such as ExtraConfig. +type OptionValueList []types.BaseOptionValue + +// OptionValueListFromMap returns a new OptionValueList object from the provided +// map. +func OptionValueListFromMap[T any](in map[string]T) OptionValueList { + if len(in) == 0 { + return nil + } + var ( + i int + out = make(OptionValueList, len(in)) + ) + for k, v := range in { + out[i] = &types.OptionValue{Key: k, Value: v} + i++ + } + return out +} + +// Get returns the value if exists, otherwise nil is returned. The second return +// value is a flag indicating whether the value exists or nil was the actual +// value. +func (ov OptionValueList) Get(key string) (any, bool) { + if ov == nil { + return nil, false + } + for i := range ov { + if optVal := ov[i].GetOptionValue(); optVal != nil { + if optVal.Key == key { + return optVal.Value, true + } + } + } + return nil, false +} + +// GetString returns the value as a string if the value exists. +func (ov OptionValueList) GetString(key string) (string, bool) { + if ov == nil { + return "", false + } + for i := range ov { + if optVal := ov[i].GetOptionValue(); optVal != nil { + if optVal.Key == key { + return getOptionValueAsString(optVal.Value), true + } + } + } + return "", false +} + +// Additions returns a diff that includes only the elements from the provided +// list that do not already exist. +func (ov OptionValueList) Additions(in ...types.BaseOptionValue) OptionValueList { + return ov.diff(in, true) +} + +// Diff returns a diff that includes the elements from the provided list that do +// not already exist or have different values. +func (ov OptionValueList) Diff(in ...types.BaseOptionValue) OptionValueList { + return ov.diff(in, false) +} + +func (ov OptionValueList) diff(in OptionValueList, addOnly bool) OptionValueList { + if ov == nil && in == nil { + return nil + } + var ( + out OptionValueList + leftOptVals = ov.Map() + ) + for i := range in { + if rightOptVal := in[i].GetOptionValue(); rightOptVal != nil { + k, v := rightOptVal.Key, rightOptVal.Value + if ov == nil { + out = append(out, &types.OptionValue{Key: k, Value: v}) + } else if leftOptVal, ok := leftOptVals[k]; !ok { + out = append(out, &types.OptionValue{Key: k, Value: v}) + } else if !addOnly && v != leftOptVal { + out = append(out, &types.OptionValue{Key: k, Value: v}) + } + } + } + if len(out) == 0 { + return nil + } + return out +} + +// Join combines this list with the provided one and returns the result, joining +// the two lists on their shared keys. +// Please note, Join(left, right) means the values from right will be appended +// to left, without overwriting any values that have shared keys. To overwrite +// the shared keys in left from right, use Join(right, left) instead. +func (ov OptionValueList) Join(in ...types.BaseOptionValue) OptionValueList { + var ( + out OptionValueList + outKeys map[string]struct{} + ) + + // Init the out slice from the left side. + if len(ov) > 0 { + outKeys = map[string]struct{}{} + for i := range ov { + if optVal := ov[i].GetOptionValue(); optVal != nil { + kv := &types.OptionValue{Key: optVal.Key, Value: optVal.Value} + out = append(out, kv) + outKeys[optVal.Key] = struct{}{} + } + } + } + + // Join the values from the right side. + for i := range in { + if rightOptVal := in[i].GetOptionValue(); rightOptVal != nil { + k, v := rightOptVal.Key, rightOptVal.Value + if _, ok := outKeys[k]; !ok { + out = append(out, &types.OptionValue{Key: k, Value: v}) + } + } + } + + if len(out) == 0 { + return nil + } + + return out +} + +// Map returns the list of option values as a map. A nil value is returned if +// the list is empty. +func (ov OptionValueList) Map() map[string]any { + if len(ov) == 0 { + return nil + } + out := map[string]any{} + for i := range ov { + if optVal := ov[i].GetOptionValue(); optVal != nil { + out[optVal.Key] = optVal.Value + } + } + if len(out) == 0 { + return nil + } + return out +} + +// StringMap returns the list of option values as a map where the values are +// strings. A nil value is returned if the list is empty. +func (ov OptionValueList) StringMap() map[string]string { + if len(ov) == 0 { + return nil + } + out := map[string]string{} + for i := range ov { + if optVal := ov[i].GetOptionValue(); optVal != nil { + out[optVal.Key] = getOptionValueAsString(optVal.Value) + } + } + if len(out) == 0 { + return nil + } + return out +} + +func getOptionValueAsString(val any) string { + switch tval := val.(type) { + case string: + return tval + default: + if rv := reflect.ValueOf(val); rv.Kind() == reflect.Pointer { + if rv.IsNil() { + return "" + } + return fmt.Sprintf("%v", rv.Elem().Interface()) + } + return fmt.Sprintf("%v", tval) + } +} diff --git a/vendor/github.com/vmware/govmomi/pbm/methods/methods.go b/vendor/github.com/vmware/govmomi/pbm/methods/methods.go index 032c15c54..d7298dfce 100644 --- a/vendor/github.com/vmware/govmomi/pbm/methods/methods.go +++ b/vendor/github.com/vmware/govmomi/pbm/methods/methods.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vmware/govmomi/pbm/pbm_util.go b/vendor/github.com/vmware/govmomi/pbm/pbm_util.go index d773b8dbb..5dcd14577 100644 --- a/vendor/github.com/vmware/govmomi/pbm/pbm_util.go +++ b/vendor/github.com/vmware/govmomi/pbm/pbm_util.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2017-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +17,17 @@ limitations under the License. package pbm import ( + "context" "fmt" "strconv" "strings" "github.com/vmware/govmomi/pbm/types" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + vim "github.com/vmware/govmomi/vim25/types" ) // A struct to capture pbm create spec details. @@ -146,3 +152,100 @@ func verifyPropertyValueIsBoolean(propertyValue string, dataType string) (bool, } return val, nil } + +// ProfileMap contains a map of storage profiles by name. +type ProfileMap struct { + Name map[string]types.BasePbmProfile + Profile []types.BasePbmProfile +} + +// ProfileMap builds a map of storage profiles by name. +func (c *Client) ProfileMap(ctx context.Context, uid ...string) (*ProfileMap, error) { + m := &ProfileMap{Name: make(map[string]types.BasePbmProfile)} + + rtype := types.PbmProfileResourceType{ + ResourceType: string(types.PbmProfileResourceTypeEnumSTORAGE), + } + + category := types.PbmProfileCategoryEnumREQUIREMENT + + var ids []types.PbmProfileId + if len(uid) == 0 { + var err error + ids, err = c.QueryProfile(ctx, rtype, string(category)) + if err != nil { + return nil, err + } + } else { + ids = make([]types.PbmProfileId, len(uid)) + for i, id := range uid { + ids[i].UniqueId = id + } + } + + profiles, err := c.RetrieveContent(ctx, ids) + if err != nil { + return nil, err + } + m.Profile = profiles + + for _, p := range profiles { + base := p.GetPbmProfile() + m.Name[base.Name] = p + m.Name[base.ProfileId.UniqueId] = p + } + + return m, nil +} + +// DatastoreMap contains a map of Datastore by name. +type DatastoreMap struct { + Name map[string]string + PlacementHub []types.PbmPlacementHub +} + +// DatastoreMap returns a map of Datastore by name. +// The root reference can be a ClusterComputeResource or Folder. +func (c *Client) DatastoreMap(ctx context.Context, vc *vim25.Client, root vim.ManagedObjectReference) (*DatastoreMap, error) { + m := &DatastoreMap{Name: make(map[string]string)} + + prop := []string{"name"} + var content []vim.ObjectContent + + if root.Type == "ClusterComputeResource" { + pc := property.DefaultCollector(vc) + var cluster mo.ClusterComputeResource + + if err := pc.RetrieveOne(ctx, root, []string{"datastore"}, &cluster); err != nil { + return nil, err + } + + if err := pc.Retrieve(ctx, cluster.Datastore, prop, &content); err != nil { + return nil, err + } + } else { + kind := []string{"Datastore"} + m := view.NewManager(vc) + + v, err := m.CreateContainerView(ctx, root, kind, true) + if err != nil { + return nil, err + } + + err = v.Retrieve(ctx, kind, prop, &content) + _ = v.Destroy(ctx) + if err != nil { + return nil, err + } + } + + for _, item := range content { + m.PlacementHub = append(m.PlacementHub, types.PbmPlacementHub{ + HubType: item.Obj.Type, + HubId: item.Obj.Value, + }) + m.Name[item.Obj.Value] = item.PropSet[0].Val.(string) + } + + return m, nil +} diff --git a/vendor/github.com/vmware/govmomi/pbm/types/enum.go b/vendor/github.com/vmware/govmomi/pbm/types/enum.go index be05cfd2a..992bcfdc3 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/enum.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/enum.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -33,6 +33,18 @@ const ( PbmAssociateAndApplyPolicyStatusPolicyStatusInvalid = PbmAssociateAndApplyPolicyStatusPolicyStatus("invalid") ) +func (e PbmAssociateAndApplyPolicyStatusPolicyStatus) Values() []PbmAssociateAndApplyPolicyStatusPolicyStatus { + return []PbmAssociateAndApplyPolicyStatusPolicyStatus{ + PbmAssociateAndApplyPolicyStatusPolicyStatusSuccess, + PbmAssociateAndApplyPolicyStatusPolicyStatusFailed, + PbmAssociateAndApplyPolicyStatusPolicyStatusInvalid, + } +} + +func (e PbmAssociateAndApplyPolicyStatusPolicyStatus) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmAssociateAndApplyPolicyStatusPolicyStatus", reflect.TypeOf((*PbmAssociateAndApplyPolicyStatusPolicyStatus)(nil)).Elem()) } @@ -58,6 +70,17 @@ const ( PbmBuiltinGenericTypeVMW_SET = PbmBuiltinGenericType("VMW_SET") ) +func (e PbmBuiltinGenericType) Values() []PbmBuiltinGenericType { + return []PbmBuiltinGenericType{ + PbmBuiltinGenericTypeVMW_RANGE, + PbmBuiltinGenericTypeVMW_SET, + } +} + +func (e PbmBuiltinGenericType) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmBuiltinGenericType", reflect.TypeOf((*PbmBuiltinGenericType)(nil)).Elem()) } @@ -80,9 +103,9 @@ const ( // Unsigned long value. // // This datatype supports the following constraint values. - // - Single value - // - Full or partial range of values (`PbmCapabilityRange`) - // - Discrete set of values (`PbmCapabilityDiscreteSet`) + // - Single value + // - Full or partial range of values (`PbmCapabilityRange`) + // - Discrete set of values (`PbmCapabilityDiscreteSet`) PbmBuiltinTypeXSD_LONG = PbmBuiltinType("XSD_LONG") // Datatype not supported. PbmBuiltinTypeXSD_SHORT = PbmBuiltinType("XSD_SHORT") @@ -93,9 +116,9 @@ const ( // Integer value. // // This datatype supports the following constraint values. - // - Single value - // - Full or partial range of values (`PbmCapabilityRange`) - // - Discrete set of values (`PbmCapabilityDiscreteSet`) + // - Single value + // - Full or partial range of values (`PbmCapabilityRange`) + // - Discrete set of values (`PbmCapabilityDiscreteSet`) PbmBuiltinTypeXSD_INT = PbmBuiltinType("XSD_INT") // String value. // @@ -108,9 +131,9 @@ const ( // // This datatype supports the following // constraint values. - // - Single value - // - Full or partial range of values (`PbmCapabilityRange`) - // - Discrete set of values (`PbmCapabilityDiscreteSet`) + // - Single value + // - Full or partial range of values (`PbmCapabilityRange`) + // - Discrete set of values (`PbmCapabilityDiscreteSet`) PbmBuiltinTypeXSD_DOUBLE = PbmBuiltinType("XSD_DOUBLE") // Date and time value. PbmBuiltinTypeXSD_DATETIME = PbmBuiltinType("XSD_DATETIME") @@ -118,13 +141,32 @@ const ( // // This datatype supports // the following constraint values. - // - Single value - // - Full or partial range of values (`PbmCapabilityRange`) - // - Discrete set of values (`PbmCapabilityDiscreteSet`) + // - Single value + // - Full or partial range of values (`PbmCapabilityRange`) + // - Discrete set of values (`PbmCapabilityDiscreteSet`) PbmBuiltinTypeVMW_TIMESPAN = PbmBuiltinType("VMW_TIMESPAN") PbmBuiltinTypeVMW_POLICY = PbmBuiltinType("VMW_POLICY") ) +func (e PbmBuiltinType) Values() []PbmBuiltinType { + return []PbmBuiltinType{ + PbmBuiltinTypeXSD_LONG, + PbmBuiltinTypeXSD_SHORT, + PbmBuiltinTypeXSD_INTEGER, + PbmBuiltinTypeXSD_INT, + PbmBuiltinTypeXSD_STRING, + PbmBuiltinTypeXSD_BOOLEAN, + PbmBuiltinTypeXSD_DOUBLE, + PbmBuiltinTypeXSD_DATETIME, + PbmBuiltinTypeVMW_TIMESPAN, + PbmBuiltinTypeVMW_POLICY, + } +} + +func (e PbmBuiltinType) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmBuiltinType", reflect.TypeOf((*PbmBuiltinType)(nil)).Elem()) } @@ -139,6 +181,16 @@ const ( PbmCapabilityOperatorNOT = PbmCapabilityOperator("NOT") ) +func (e PbmCapabilityOperator) Values() []PbmCapabilityOperator { + return []PbmCapabilityOperator{ + PbmCapabilityOperatorNOT, + } +} + +func (e PbmCapabilityOperator) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmCapabilityOperator", reflect.TypeOf((*PbmCapabilityOperator)(nil)).Elem()) } @@ -167,6 +219,22 @@ const ( PbmCapabilityTimeUnitTypeYEARS = PbmCapabilityTimeUnitType("YEARS") ) +func (e PbmCapabilityTimeUnitType) Values() []PbmCapabilityTimeUnitType { + return []PbmCapabilityTimeUnitType{ + PbmCapabilityTimeUnitTypeSECONDS, + PbmCapabilityTimeUnitTypeMINUTES, + PbmCapabilityTimeUnitTypeHOURS, + PbmCapabilityTimeUnitTypeDAYS, + PbmCapabilityTimeUnitTypeWEEKS, + PbmCapabilityTimeUnitTypeMONTHS, + PbmCapabilityTimeUnitTypeYEARS, + } +} + +func (e PbmCapabilityTimeUnitType) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmCapabilityTimeUnitType", reflect.TypeOf((*PbmCapabilityTimeUnitType)(nil)).Elem()) } @@ -188,6 +256,18 @@ const ( PbmComplianceResultComplianceTaskStatusFailed = PbmComplianceResultComplianceTaskStatus("failed") ) +func (e PbmComplianceResultComplianceTaskStatus) Values() []PbmComplianceResultComplianceTaskStatus { + return []PbmComplianceResultComplianceTaskStatus{ + PbmComplianceResultComplianceTaskStatusInProgress, + PbmComplianceResultComplianceTaskStatusSuccess, + PbmComplianceResultComplianceTaskStatusFailed, + } +} + +func (e PbmComplianceResultComplianceTaskStatus) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmComplianceResultComplianceTaskStatus", reflect.TypeOf((*PbmComplianceResultComplianceTaskStatus)(nil)).Elem()) } @@ -221,6 +301,20 @@ const ( PbmComplianceStatusOutOfDate = PbmComplianceStatus("outOfDate") ) +func (e PbmComplianceStatus) Values() []PbmComplianceStatus { + return []PbmComplianceStatus{ + PbmComplianceStatusCompliant, + PbmComplianceStatusNonCompliant, + PbmComplianceStatusUnknown, + PbmComplianceStatusNotApplicable, + PbmComplianceStatusOutOfDate, + } +} + +func (e PbmComplianceStatus) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmComplianceStatus", reflect.TypeOf((*PbmComplianceStatus)(nil)).Elem()) } @@ -236,6 +330,17 @@ const ( PbmDebugManagerKeystoreNameTRUSTED_ROOTS = PbmDebugManagerKeystoreName("TRUSTED_ROOTS") ) +func (e PbmDebugManagerKeystoreName) Values() []PbmDebugManagerKeystoreName { + return []PbmDebugManagerKeystoreName{ + PbmDebugManagerKeystoreNameSMS, + PbmDebugManagerKeystoreNameTRUSTED_ROOTS, + } +} + +func (e PbmDebugManagerKeystoreName) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmDebugManagerKeystoreName", reflect.TypeOf((*PbmDebugManagerKeystoreName)(nil)).Elem()) } @@ -267,6 +372,19 @@ const ( PbmHealthStatusForEntityUnknown = PbmHealthStatusForEntity("unknown") ) +func (e PbmHealthStatusForEntity) Values() []PbmHealthStatusForEntity { + return []PbmHealthStatusForEntity{ + PbmHealthStatusForEntityRed, + PbmHealthStatusForEntityYellow, + PbmHealthStatusForEntityGreen, + PbmHealthStatusForEntityUnknown, + } +} + +func (e PbmHealthStatusForEntity) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmHealthStatusForEntity", reflect.TypeOf((*PbmHealthStatusForEntity)(nil)).Elem()) } @@ -288,6 +406,22 @@ const ( PbmIofilterInfoFilterTypeDATASTOREIOCONTROL = PbmIofilterInfoFilterType("DATASTOREIOCONTROL") ) +func (e PbmIofilterInfoFilterType) Values() []PbmIofilterInfoFilterType { + return []PbmIofilterInfoFilterType{ + PbmIofilterInfoFilterTypeINSPECTION, + PbmIofilterInfoFilterTypeCOMPRESSION, + PbmIofilterInfoFilterTypeENCRYPTION, + PbmIofilterInfoFilterTypeREPLICATION, + PbmIofilterInfoFilterTypeCACHE, + PbmIofilterInfoFilterTypeDATAPROVIDER, + PbmIofilterInfoFilterTypeDATASTOREIOCONTROL, + } +} + +func (e PbmIofilterInfoFilterType) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmIofilterInfoFilterType", reflect.TypeOf((*PbmIofilterInfoFilterType)(nil)).Elem()) } @@ -305,8 +439,28 @@ const ( PbmLineOfServiceInfoLineOfServiceEnumDATA_PROVIDER = PbmLineOfServiceInfoLineOfServiceEnum("DATA_PROVIDER") PbmLineOfServiceInfoLineOfServiceEnumDATASTORE_IO_CONTROL = PbmLineOfServiceInfoLineOfServiceEnum("DATASTORE_IO_CONTROL") PbmLineOfServiceInfoLineOfServiceEnumDATA_PROTECTION = PbmLineOfServiceInfoLineOfServiceEnum("DATA_PROTECTION") + PbmLineOfServiceInfoLineOfServiceEnumSTRETCHED_CLUSTER = PbmLineOfServiceInfoLineOfServiceEnum("STRETCHED_CLUSTER") ) +func (e PbmLineOfServiceInfoLineOfServiceEnum) Values() []PbmLineOfServiceInfoLineOfServiceEnum { + return []PbmLineOfServiceInfoLineOfServiceEnum{ + PbmLineOfServiceInfoLineOfServiceEnumINSPECTION, + PbmLineOfServiceInfoLineOfServiceEnumCOMPRESSION, + PbmLineOfServiceInfoLineOfServiceEnumENCRYPTION, + PbmLineOfServiceInfoLineOfServiceEnumREPLICATION, + PbmLineOfServiceInfoLineOfServiceEnumCACHING, + PbmLineOfServiceInfoLineOfServiceEnumPERSISTENCE, + PbmLineOfServiceInfoLineOfServiceEnumDATA_PROVIDER, + PbmLineOfServiceInfoLineOfServiceEnumDATASTORE_IO_CONTROL, + PbmLineOfServiceInfoLineOfServiceEnumDATA_PROTECTION, + PbmLineOfServiceInfoLineOfServiceEnumSTRETCHED_CLUSTER, + } +} + +func (e PbmLineOfServiceInfoLineOfServiceEnum) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmLineOfServiceInfoLineOfServiceEnum", reflect.TypeOf((*PbmLineOfServiceInfoLineOfServiceEnum)(nil)).Elem()) } @@ -334,6 +488,23 @@ const ( PbmLoggingConfigurationComponentVmomi = PbmLoggingConfigurationComponent("vmomi") ) +func (e PbmLoggingConfigurationComponent) Values() []PbmLoggingConfigurationComponent { + return []PbmLoggingConfigurationComponent{ + PbmLoggingConfigurationComponentPbm, + PbmLoggingConfigurationComponentVslm, + PbmLoggingConfigurationComponentSms, + PbmLoggingConfigurationComponentSpbm, + PbmLoggingConfigurationComponentSps, + PbmLoggingConfigurationComponentHttpclient_header, + PbmLoggingConfigurationComponentHttpclient_content, + PbmLoggingConfigurationComponentVmomi, + } +} + +func (e PbmLoggingConfigurationComponent) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmLoggingConfigurationComponent", reflect.TypeOf((*PbmLoggingConfigurationComponent)(nil)).Elem()) } @@ -351,6 +522,18 @@ const ( PbmLoggingConfigurationLogLevelTRACE = PbmLoggingConfigurationLogLevel("TRACE") ) +func (e PbmLoggingConfigurationLogLevel) Values() []PbmLoggingConfigurationLogLevel { + return []PbmLoggingConfigurationLogLevel{ + PbmLoggingConfigurationLogLevelINFO, + PbmLoggingConfigurationLogLevelDEBUG, + PbmLoggingConfigurationLogLevelTRACE, + } +} + +func (e PbmLoggingConfigurationLogLevel) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmLoggingConfigurationLogLevel", reflect.TypeOf((*PbmLoggingConfigurationLogLevel)(nil)).Elem()) } @@ -384,6 +567,23 @@ const ( PbmObjectTypeUnknown = PbmObjectType("unknown") ) +func (e PbmObjectType) Values() []PbmObjectType { + return []PbmObjectType{ + PbmObjectTypeVirtualMachine, + PbmObjectTypeVirtualMachineAndDisks, + PbmObjectTypeVirtualDiskId, + PbmObjectTypeVirtualDiskUUID, + PbmObjectTypeDatastore, + PbmObjectTypeVsanObjectId, + PbmObjectTypeFileShareId, + PbmObjectTypeUnknown, + } +} + +func (e PbmObjectType) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmObjectType", reflect.TypeOf((*PbmObjectType)(nil)).Elem()) } @@ -405,6 +605,20 @@ const ( PbmOperationCLONE = PbmOperation("CLONE") ) +func (e PbmOperation) Values() []PbmOperation { + return []PbmOperation{ + PbmOperationCREATE, + PbmOperationREGISTER, + PbmOperationRECONFIGURE, + PbmOperationMIGRATE, + PbmOperationCLONE, + } +} + +func (e PbmOperation) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmOperation", reflect.TypeOf((*PbmOperation)(nil)).Elem()) } @@ -428,6 +642,18 @@ const ( PbmPolicyAssociationVolumeAllocationTypeConserveSpaceWhenPossible = PbmPolicyAssociationVolumeAllocationType("ConserveSpaceWhenPossible") ) +func (e PbmPolicyAssociationVolumeAllocationType) Values() []PbmPolicyAssociationVolumeAllocationType { + return []PbmPolicyAssociationVolumeAllocationType{ + PbmPolicyAssociationVolumeAllocationTypeFullyInitialized, + PbmPolicyAssociationVolumeAllocationTypeReserveSpace, + PbmPolicyAssociationVolumeAllocationTypeConserveSpaceWhenPossible, + } +} + +func (e PbmPolicyAssociationVolumeAllocationType) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmPolicyAssociationVolumeAllocationType", reflect.TypeOf((*PbmPolicyAssociationVolumeAllocationType)(nil)).Elem()) } @@ -459,6 +685,18 @@ const ( PbmProfileCategoryEnumDATA_SERVICE_POLICY = PbmProfileCategoryEnum("DATA_SERVICE_POLICY") ) +func (e PbmProfileCategoryEnum) Values() []PbmProfileCategoryEnum { + return []PbmProfileCategoryEnum{ + PbmProfileCategoryEnumREQUIREMENT, + PbmProfileCategoryEnumRESOURCE, + PbmProfileCategoryEnumDATA_SERVICE_POLICY, + } +} + +func (e PbmProfileCategoryEnum) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmProfileCategoryEnum", reflect.TypeOf((*PbmProfileCategoryEnum)(nil)).Elem()) } @@ -474,6 +712,16 @@ const ( PbmProfileResourceTypeEnumSTORAGE = PbmProfileResourceTypeEnum("STORAGE") ) +func (e PbmProfileResourceTypeEnum) Values() []PbmProfileResourceTypeEnum { + return []PbmProfileResourceTypeEnum{ + PbmProfileResourceTypeEnumSTORAGE, + } +} + +func (e PbmProfileResourceTypeEnum) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmProfileResourceTypeEnum", reflect.TypeOf((*PbmProfileResourceTypeEnum)(nil)).Elem()) } @@ -496,6 +744,20 @@ const ( PbmSystemCreatedProfileTypeVsanMaxDefaultProfile = PbmSystemCreatedProfileType("VsanMaxDefaultProfile") ) +func (e PbmSystemCreatedProfileType) Values() []PbmSystemCreatedProfileType { + return []PbmSystemCreatedProfileType{ + PbmSystemCreatedProfileTypeVsanDefaultProfile, + PbmSystemCreatedProfileTypeVVolDefaultProfile, + PbmSystemCreatedProfileTypePmemDefaultProfile, + PbmSystemCreatedProfileTypeVmcManagementProfile, + PbmSystemCreatedProfileTypeVsanMaxDefaultProfile, + } +} + +func (e PbmSystemCreatedProfileType) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmSystemCreatedProfileType", reflect.TypeOf((*PbmSystemCreatedProfileType)(nil)).Elem()) } @@ -515,6 +777,19 @@ const ( PbmVmOperationCLONE = PbmVmOperation("CLONE") ) +func (e PbmVmOperation) Values() []PbmVmOperation { + return []PbmVmOperation{ + PbmVmOperationCREATE, + PbmVmOperationRECONFIGURE, + PbmVmOperationMIGRATE, + PbmVmOperationCLONE, + } +} + +func (e PbmVmOperation) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmVmOperation", reflect.TypeOf((*PbmVmOperation)(nil)).Elem()) } @@ -535,6 +810,18 @@ const ( PbmVvolTypeSwap = PbmVvolType("Swap") ) +func (e PbmVvolType) Values() []PbmVvolType { + return []PbmVvolType{ + PbmVvolTypeConfig, + PbmVvolTypeData, + PbmVvolTypeSwap, + } +} + +func (e PbmVvolType) Strings() []string { + return types.EnumValuesAsStrings(e.Values()) +} + func init() { types.Add("pbm:PbmVvolType", reflect.TypeOf((*PbmVvolType)(nil)).Elem()) } diff --git a/vendor/github.com/vmware/govmomi/pbm/types/if.go b/vendor/github.com/vmware/govmomi/pbm/types/if.go index 4008dffff..3301d21e6 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/if.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/if.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/vmware/govmomi/pbm/types/types.go b/vendor/github.com/vmware/govmomi/pbm/types/types.go index 4c6f72cae..5bd9598f7 100644 --- a/vendor/github.com/vmware/govmomi/pbm/types/types.go +++ b/vendor/github.com/vmware/govmomi/pbm/types/types.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,6 +24,8 @@ import ( ) // A boxed array of `PbmCapabilityConstraintInstance`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilityConstraintInstance struct { PbmCapabilityConstraintInstance []PbmCapabilityConstraintInstance `xml:"PbmCapabilityConstraintInstance,omitempty" json:"_value"` } @@ -33,6 +35,8 @@ func init() { } // A boxed array of `PbmCapabilityInstance`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilityInstance struct { PbmCapabilityInstance []PbmCapabilityInstance `xml:"PbmCapabilityInstance,omitempty" json:"_value"` } @@ -42,6 +46,8 @@ func init() { } // A boxed array of `PbmCapabilityMetadata`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilityMetadata struct { PbmCapabilityMetadata []PbmCapabilityMetadata `xml:"PbmCapabilityMetadata,omitempty" json:"_value"` } @@ -51,6 +57,8 @@ func init() { } // A boxed array of `PbmCapabilityMetadataPerCategory`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilityMetadataPerCategory struct { PbmCapabilityMetadataPerCategory []PbmCapabilityMetadataPerCategory `xml:"PbmCapabilityMetadataPerCategory,omitempty" json:"_value"` } @@ -60,6 +68,8 @@ func init() { } // A boxed array of `PbmCapabilityPropertyInstance`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilityPropertyInstance struct { PbmCapabilityPropertyInstance []PbmCapabilityPropertyInstance `xml:"PbmCapabilityPropertyInstance,omitempty" json:"_value"` } @@ -69,6 +79,8 @@ func init() { } // A boxed array of `PbmCapabilityPropertyMetadata`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilityPropertyMetadata struct { PbmCapabilityPropertyMetadata []PbmCapabilityPropertyMetadata `xml:"PbmCapabilityPropertyMetadata,omitempty" json:"_value"` } @@ -78,6 +90,8 @@ func init() { } // A boxed array of `PbmCapabilitySchema`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilitySchema struct { PbmCapabilitySchema []PbmCapabilitySchema `xml:"PbmCapabilitySchema,omitempty" json:"_value"` } @@ -87,6 +101,8 @@ func init() { } // A boxed array of `PbmCapabilitySubProfile`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilitySubProfile struct { PbmCapabilitySubProfile []PbmCapabilitySubProfile `xml:"PbmCapabilitySubProfile,omitempty" json:"_value"` } @@ -96,6 +112,8 @@ func init() { } // A boxed array of `PbmCapabilityVendorNamespaceInfo`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilityVendorNamespaceInfo struct { PbmCapabilityVendorNamespaceInfo []PbmCapabilityVendorNamespaceInfo `xml:"PbmCapabilityVendorNamespaceInfo,omitempty" json:"_value"` } @@ -105,6 +123,8 @@ func init() { } // A boxed array of `PbmCapabilityVendorResourceTypeInfo`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCapabilityVendorResourceTypeInfo struct { PbmCapabilityVendorResourceTypeInfo []PbmCapabilityVendorResourceTypeInfo `xml:"PbmCapabilityVendorResourceTypeInfo,omitempty" json:"_value"` } @@ -114,6 +134,8 @@ func init() { } // A boxed array of `PbmCompliancePolicyStatus`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmCompliancePolicyStatus struct { PbmCompliancePolicyStatus []PbmCompliancePolicyStatus `xml:"PbmCompliancePolicyStatus,omitempty" json:"_value"` } @@ -123,6 +145,8 @@ func init() { } // A boxed array of `PbmComplianceResult`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmComplianceResult struct { PbmComplianceResult []PbmComplianceResult `xml:"PbmComplianceResult,omitempty" json:"_value"` } @@ -132,6 +156,8 @@ func init() { } // A boxed array of `PbmDatastoreSpaceStatistics`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmDatastoreSpaceStatistics struct { PbmDatastoreSpaceStatistics []PbmDatastoreSpaceStatistics `xml:"PbmDatastoreSpaceStatistics,omitempty" json:"_value"` } @@ -141,6 +167,8 @@ func init() { } // A boxed array of `PbmDefaultProfileInfo`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmDefaultProfileInfo struct { PbmDefaultProfileInfo []PbmDefaultProfileInfo `xml:"PbmDefaultProfileInfo,omitempty" json:"_value"` } @@ -150,6 +178,8 @@ func init() { } // A boxed array of `PbmFaultNoPermissionEntityPrivileges`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmFaultNoPermissionEntityPrivileges struct { PbmFaultNoPermissionEntityPrivileges []PbmFaultNoPermissionEntityPrivileges `xml:"PbmFaultNoPermissionEntityPrivileges,omitempty" json:"_value"` } @@ -159,6 +189,8 @@ func init() { } // A boxed array of `PbmLoggingConfiguration`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmLoggingConfiguration struct { PbmLoggingConfiguration []PbmLoggingConfiguration `xml:"PbmLoggingConfiguration,omitempty" json:"_value"` } @@ -168,6 +200,8 @@ func init() { } // A boxed array of `PbmPlacementCompatibilityResult`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmPlacementCompatibilityResult struct { PbmPlacementCompatibilityResult []PbmPlacementCompatibilityResult `xml:"PbmPlacementCompatibilityResult,omitempty" json:"_value"` } @@ -177,6 +211,8 @@ func init() { } // A boxed array of `PbmPlacementHub`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmPlacementHub struct { PbmPlacementHub []PbmPlacementHub `xml:"PbmPlacementHub,omitempty" json:"_value"` } @@ -186,6 +222,8 @@ func init() { } // A boxed array of `PbmPlacementMatchingResources`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmPlacementMatchingResources struct { PbmPlacementMatchingResources []BasePbmPlacementMatchingResources `xml:"PbmPlacementMatchingResources,omitempty,typeattr" json:"_value"` } @@ -195,6 +233,8 @@ func init() { } // A boxed array of `PbmPlacementRequirement`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmPlacementRequirement struct { PbmPlacementRequirement []BasePbmPlacementRequirement `xml:"PbmPlacementRequirement,omitempty,typeattr" json:"_value"` } @@ -204,6 +244,8 @@ func init() { } // A boxed array of `PbmPlacementResourceUtilization`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmPlacementResourceUtilization struct { PbmPlacementResourceUtilization []PbmPlacementResourceUtilization `xml:"PbmPlacementResourceUtilization,omitempty" json:"_value"` } @@ -213,6 +255,8 @@ func init() { } // A boxed array of `PbmProfile`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmProfile struct { PbmProfile []BasePbmProfile `xml:"PbmProfile,omitempty,typeattr" json:"_value"` } @@ -222,6 +266,8 @@ func init() { } // A boxed array of `PbmProfileId`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmProfileId struct { PbmProfileId []PbmProfileId `xml:"PbmProfileId,omitempty" json:"_value"` } @@ -231,6 +277,8 @@ func init() { } // A boxed array of `PbmProfileOperationOutcome`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmProfileOperationOutcome struct { PbmProfileOperationOutcome []PbmProfileOperationOutcome `xml:"PbmProfileOperationOutcome,omitempty" json:"_value"` } @@ -240,6 +288,8 @@ func init() { } // A boxed array of `PbmProfileResourceType`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmProfileResourceType struct { PbmProfileResourceType []PbmProfileResourceType `xml:"PbmProfileResourceType,omitempty" json:"_value"` } @@ -249,6 +299,8 @@ func init() { } // A boxed array of `PbmProfileType`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmProfileType struct { PbmProfileType []PbmProfileType `xml:"PbmProfileType,omitempty" json:"_value"` } @@ -258,6 +310,8 @@ func init() { } // A boxed array of `PbmQueryProfileResult`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmQueryProfileResult struct { PbmQueryProfileResult []PbmQueryProfileResult `xml:"PbmQueryProfileResult,omitempty" json:"_value"` } @@ -267,6 +321,8 @@ func init() { } // A boxed array of `PbmQueryReplicationGroupResult`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmQueryReplicationGroupResult struct { PbmQueryReplicationGroupResult []PbmQueryReplicationGroupResult `xml:"PbmQueryReplicationGroupResult,omitempty" json:"_value"` } @@ -276,6 +332,8 @@ func init() { } // A boxed array of `PbmRollupComplianceResult`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmRollupComplianceResult struct { PbmRollupComplianceResult []PbmRollupComplianceResult `xml:"PbmRollupComplianceResult,omitempty" json:"_value"` } @@ -285,6 +343,8 @@ func init() { } // A boxed array of `PbmServerObjectRef`. To be used in `Any` placeholders. +// +// This structure may be used only with operations rendered under `/pbm`. type ArrayOfPbmServerObjectRef struct { PbmServerObjectRef []PbmServerObjectRef `xml:"PbmServerObjectRef,omitempty" json:"_value"` } @@ -340,6 +400,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmAssignDefaultRequirementProfile`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmAssignDefaultRequirementProfileRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // The profile that needs to be made default profile. @@ -544,6 +606,7 @@ func init() { types.Add("pbm:PbmCapabilityMetadataPerCategory", reflect.TypeOf((*PbmCapabilityMetadataPerCategory)(nil)).Elem()) } +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityMetadataUniqueId struct { types.DynamicData @@ -601,23 +664,22 @@ type PbmCapabilityProfile struct { // The profileCategory // is a string value that corresponds to one of the // `PbmProfileCategoryEnum_enum` values. - // - REQUIREMENT profile - Defines the storage constraints applied - // to virtual machine placement. Requirements are defined by - // the user and can be associated with virtual machines and virtual - // disks. During provisioning, you can use a requirements profile - // for compliance and placement checking to support - // selection and configuration of resources. - // - RESOURCE profile - Specifies system-defined storage capabilities. - // You cannot modify a resource profile. You cannot associate a resource - // profile with vSphere entities, use it during provisioning, or target - // entities for resource selection or configuration. - // This type of profile gives the user visibility into the capabilities - // supported by the storage provider. - // - // DATA\_SERVICE\_POLICY - Indicates a data service policy that can - // be embedded into another storage policy. Policies of this type can't - // be assigned to Virtual Machines or Virtual Disks. This policy cannot - // be used for compliance checking. + // - REQUIREMENT profile - Defines the storage constraints applied + // to virtual machine placement. Requirements are defined by + // the user and can be associated with virtual machines and virtual + // disks. During provisioning, you can use a requirements profile + // for compliance and placement checking to support + // selection and configuration of resources. + // - RESOURCE profile - Specifies system-defined storage capabilities. + // You cannot modify a resource profile. You cannot associate a resource + // profile with vSphere entities, use it during provisioning, or target + // entities for resource selection or configuration. + // This type of profile gives the user visibility into the capabilities + // supported by the storage provider. + // - DATA\_SERVICE\_POLICY - Indicates a data service policy that can + // be embedded into another storage policy. Policies of this type can't + // be assigned to Virtual Machines or Virtual Disks. This policy cannot + // be used for compliance checking. ProfileCategory string `xml:"profileCategory" json:"profileCategory"` // Type of the target resource to which the capability information applies. // @@ -764,9 +826,9 @@ type PbmCapabilityPropertyInstance struct { // // You must specify the value. // A property value is one value or a collection of values. - // - A single property value is expressed as a scalar value. - // - A collection of values is expressed as a `PbmCapabilityDiscreteSet` - // or a `PbmCapabilityRange` of values. + // - A single property value is expressed as a scalar value. + // - A collection of values is expressed as a `PbmCapabilityDiscreteSet` + // or a `PbmCapabilityRange` of values. // // The datatype of each value must be one of the // `PbmBuiltinType_enum` datatypes. @@ -799,23 +861,23 @@ type PbmCapabilityPropertyMetadata struct { // (`PbmCapabilityPropertyInstance*.*PbmCapabilityPropertyInstance.id`). Id string `xml:"id" json:"id"` // Property name and description. - // - The summary.label property - // (`PbmExtendedElementDescription.label`) - // contains property 'name' in server locale. - // - The summary.summary property - // (`PbmExtendedElementDescription.summary`) - // contains property 'description' in server locale. - // - The summary.messageCatalogKeyPrefix property - // (`PbmExtendedElementDescription.messageCatalogKeyPrefix`) - // contains unique prefix for this property within given message catalog. - // Prefix format: <capability\_unique\_identifier.<property\_id - // capability\_unique\_identifier -- string representation of - // `PbmCapabilityMetadataUniqueId` which globally identifies given - // capability metadata definition uniquely. - // property\_id -- 'id' of this property `PbmCapabilityPropertyMetadata.id` - // Eg www.emc.com.storage.Recovery.Recovery\_site - // www.emc.com.storage.Recovery.RPO - // www.emc.com.storage.Recovery.RTO + // - The summary.label property + // (`PbmExtendedElementDescription.label`) + // contains property 'name' in server locale. + // - The summary.summary property + // (`PbmExtendedElementDescription.summary`) + // contains property 'description' in server locale. + // - The summary.messageCatalogKeyPrefix property + // (`PbmExtendedElementDescription.messageCatalogKeyPrefix`) + // contains unique prefix for this property within given message catalog. + // Prefix format: <capability\_unique\_identifier>.<property\_id> + // capability\_unique\_identifier -- string representation of + // `PbmCapabilityMetadataUniqueId` which globally identifies given + // capability metadata definition uniquely. + // property\_id -- 'id' of this property `PbmCapabilityPropertyMetadata.id` + // Eg www.emc.com.storage.Recovery.Recovery\_site + // www.emc.com.storage.Recovery.RPO + // www.emc.com.storage.Recovery.RTO Summary PbmExtendedElementDescription `xml:"summary" json:"summary"` // Indicates whether incorporating given capability is mandatory during creation of // profile. @@ -826,11 +888,11 @@ type PbmCapabilityPropertyMetadata struct { // (`PbmCapabilityPropertyInstance*.*PbmCapabilityPropertyInstance.value`) // is specified as a builtin datatype and may also specify the interpretation of a // collection of values of that datatype. - // - `PbmCapabilityPropertyMetadata.type*.*PbmCapabilityTypeInfo.typeName` - // specifies the `PbmBuiltinType_enum`. - // - `PbmCapabilityPropertyMetadata.type*.*PbmCapabilityGenericTypeInfo.genericTypeName` - // indicates how a collection of values of the specified datatype will be interpreted - // (`PbmBuiltinGenericType_enum`). + // - `PbmCapabilityPropertyMetadata.type*.*PbmCapabilityTypeInfo.typeName` + // specifies the `PbmBuiltinType_enum`. + // - `PbmCapabilityPropertyMetadata.type*.*PbmCapabilityGenericTypeInfo.genericTypeName` + // indicates how a collection of values of the specified datatype will be interpreted + // (`PbmBuiltinGenericType_enum`). Type BasePbmCapabilityTypeInfo `xml:"type,omitempty,typeattr" json:"type,omitempty"` // Default value, if any, that the property will assume when not // constrained by requirements. @@ -861,14 +923,14 @@ type PbmCapabilityPropertyMetadata struct { // different types across capability profiles. This value, if specified, // specifies the expected kind of constraint used in requirement profiles. // Considerations for using this information: - // - This is only a hint; any properly formed constraint - // (see `PbmCapabilityPropertyInstance.value`) - // is still valid for a requirement profile. - // - If VMW\_SET is hinted, then a single value matching the property metadata type is - // also an expected form of constraint, as the latter is an allowed convenience - // for expressing a single-member set. - // - If this hint is not specified, then the authoring system may default to a form of - // constraint determined by its own criteria. + // - This is only a hint; any properly formed constraint + // (see `PbmCapabilityPropertyInstance.value`) + // is still valid for a requirement profile. + // - If VMW\_SET is hinted, then a single value matching the property metadata type is + // also an expected form of constraint, as the latter is an allowed convenience + // for expressing a single-member set. + // - If this hint is not specified, then the authoring system may default to a form of + // constraint determined by its own criteria. RequirementsTypeHint string `xml:"requirementsTypeHint,omitempty" json:"requirementsTypeHint,omitempty"` } @@ -938,16 +1000,16 @@ type PbmCapabilitySchemaVendorInfo struct { VendorUuid string `xml:"vendorUuid" json:"vendorUuid"` // Captures name and description information about the vendor/owner of // the schema. - // - The summary.label property - // (`PbmExtendedElementDescription.label`) - // contains vendor name information in server locale. - // - The summary.summary property - // (`PbmExtendedElementDescription.summary`) - // contains vendor description string in server locale. - // - The summary.messageCatalogKeyPrefix property - // (`PbmExtendedElementDescription.messageCatalogKeyPrefix`) - // contains unique prefix for the vendor information within given message - // catalog. + // - The summary.label property + // (`PbmExtendedElementDescription.label`) + // contains vendor name information in server locale. + // - The summary.summary property + // (`PbmExtendedElementDescription.summary`) + // contains vendor description string in server locale. + // - The summary.messageCatalogKeyPrefix property + // (`PbmExtendedElementDescription.messageCatalogKeyPrefix`) + // contains unique prefix for the vendor information within given message + // catalog. Info PbmExtendedElementDescription `xml:"info" json:"info"` } @@ -1062,6 +1124,7 @@ func init() { types.Add("pbm:PbmCapabilityTypeInfo", reflect.TypeOf((*PbmCapabilityTypeInfo)(nil)).Elem()) } +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityVendorNamespaceInfo struct { types.DynamicData @@ -1073,6 +1136,7 @@ func init() { types.Add("pbm:PbmCapabilityVendorNamespaceInfo", reflect.TypeOf((*PbmCapabilityVendorNamespaceInfo)(nil)).Elem()) } +// This structure may be used only with operations rendered under `/pbm`. type PbmCapabilityVendorResourceTypeInfo struct { types.DynamicData @@ -1081,7 +1145,7 @@ type PbmCapabilityVendorResourceTypeInfo struct { // // Must match one of the values for enum `PbmProfileResourceTypeEnum_enum` ResourceType string `xml:"resourceType" json:"resourceType"` - // List of all vendorInfo -- namespaceInfo tuples that are registered for + // List of all vendorInfo <--> namespaceInfo tuples that are registered for // given resource type VendorNamespaceInfo []PbmCapabilityVendorNamespaceInfo `xml:"vendorNamespaceInfo" json:"vendorNamespaceInfo"` } @@ -1097,6 +1161,8 @@ func init() { } // The parameters of `PbmPlacementSolver.PbmCheckCompatibility`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCheckCompatibilityRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Candidate list of hubs, either datastores or storage pods or a @@ -1123,6 +1189,8 @@ func init() { } // The parameters of `PbmPlacementSolver.PbmCheckCompatibilityWithSpec`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCheckCompatibilityWithSpecRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Candidate list of hubs, either datastores or storage pods @@ -1148,6 +1216,8 @@ func init() { } // The parameters of `PbmComplianceManager.PbmCheckCompliance`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCheckComplianceRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // One or more references to storage entities. @@ -1155,16 +1225,16 @@ type PbmCheckComplianceRequestType struct { // A maximum of 1000 virtual machines and/or virtual disks can be specified // in a call. The results of calling the checkCompliance API with // more than a 1000 entities is undefined. - // - If the list of entities also contains datastores, the Server - // will ignore the datastores. - // - If the list contains valid and invalid entities, the Server ignores - // the invalid entities and returns results for the valid entities. - // Invalid entities are entities that are not in the vCenter inventory. - // - If the list contains only datastores, the method throws - // an InvalidArgument fault. - // - If the list contains virtual machines and disks and the entities - // are invalid or have been deleted by the time of the request, the method - // throws an InvalidArgument fault. + // - If the list of entities also contains datastores, the Server + // will ignore the datastores. + // - If the list contains valid and invalid entities, the Server ignores + // the invalid entities and returns results for the valid entities. + // Invalid entities are entities that are not in the vCenter inventory. + // - If the list contains only datastores, the method throws + // an InvalidArgument fault. + // - If the list contains virtual machines and disks and the entities + // are invalid or have been deleted by the time of the request, the method + // throws an InvalidArgument fault. // // If an entity does not have an associated storage profile, the entity // is removed from the list. @@ -1189,6 +1259,8 @@ func init() { } // The parameters of `PbmPlacementSolver.PbmCheckRequirements`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCheckRequirementsRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Candidate list of hubs, either datastores or storage pods @@ -1226,6 +1298,8 @@ func init() { } // The parameters of `PbmComplianceManager.PbmCheckRollupCompliance`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCheckRollupComplianceRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // One or more references to virtual machines. @@ -1394,6 +1468,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmCreate`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmCreateRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Capability-based profile specification. @@ -1527,6 +1603,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmDelete`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmDeleteRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Array of profile identifiers. @@ -1562,6 +1640,7 @@ func init() { types.Add("pbm:PbmDuplicateNameFault", reflect.TypeOf((*PbmDuplicateNameFault)(nil)).Elem()) } +// This structure may be used only with operations rendered under `/pbm`. type PbmExtendedElementDescription struct { types.DynamicData @@ -1580,7 +1659,7 @@ type PbmExtendedElementDescription struct { // will be provided by #messageArg. // Both summary and label in ElementDescription will have a corresponding // entry in the message catalog with the keys - // .summary and .label + // <messageCatalogKeyPrefix>.summary and <messageCatalogKeyPrefix>.label // respectively. // ElementDescription.summary and ElementDescription.label will contain // the strings in server locale. @@ -1644,6 +1723,7 @@ func init() { types.Add("pbm:PbmFaultNoPermission", reflect.TypeOf((*PbmFaultNoPermission)(nil)).Elem()) } +// This structure may be used only with operations rendered under `/pbm`. type PbmFaultNoPermissionEntityPrivileges struct { types.DynamicData @@ -1688,6 +1768,7 @@ func init() { types.Add("pbm:PbmFaultNotFoundFault", reflect.TypeOf((*PbmFaultNotFoundFault)(nil)).Elem()) } +// This structure may be used only with operations rendered under `/pbm`. type PbmFaultProfileStorageFault struct { PbmFault } @@ -1709,6 +1790,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmFetchCapabilityMetadata`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFetchCapabilityMetadataRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Type of profile resource. The Server supports the "STORAGE" resource @@ -1738,6 +1821,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmFetchCapabilitySchema`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFetchCapabilitySchemaRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Unique identifier for the vendor/owner of capability metadata. @@ -1768,19 +1853,21 @@ func init() { } // The parameters of `PbmComplianceManager.PbmFetchComplianceResult`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFetchComplianceResultRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // One or more references to storage entities. // A maximum of 1000 virtual machines and/or virtual disks can be specified // in a call. The results of calling the fetchComplianceResult API with // more than a 1000 entities is undefined. - // - If the list of entities also contains datastores, the Server - // will ignore the datastores. - // - If the list contains valid and invalid entities, the Server ignores - // the invalid entities and returns results for the valid entities. - // Invalid entities are entities that are not in the vCenter inventory. - // - If the list contains only datastores, the method throws - // an InvalidArgument fault. + // - If the list of entities also contains datastores, the Server + // will ignore the datastores. + // - If the list contains valid and invalid entities, the Server ignores + // the invalid entities and returns results for the valid entities. + // Invalid entities are entities that are not in the vCenter inventory. + // - If the list contains only datastores, the method throws + // an InvalidArgument fault. Entities []PbmServerObjectRef `xml:"entities" json:"entities"` // Not used. if specified, the Server ignores the value. // The Server uses the profiles associated with the specified entities. @@ -1839,6 +1926,8 @@ func init() { } // The parameters of `PbmComplianceManager.PbmFetchRollupComplianceResult`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFetchRollupComplianceResultRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // One or more virtual machines. @@ -1863,6 +1952,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmFetchVendorInfo`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFetchVendorInfoRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Specifies the resource type. The Server supports the STORAGE resource @@ -1886,6 +1977,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmFindApplicableDefaultProfile`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmFindApplicableDefaultProfileRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Datastores for which the default profile is found out. Note that @@ -1965,6 +2058,7 @@ func init() { types.Add("pbm:PbmLineOfServiceInfo", reflect.TypeOf((*PbmLineOfServiceInfo)(nil)).Elem()) } +// This structure may be used only with operations rendered under `/pbm`. type PbmLoggingConfiguration struct { types.DynamicData @@ -2318,6 +2412,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmQueryAssociatedEntities`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryAssociatedEntitiesRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Storage policy array. @@ -2339,6 +2435,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmQueryAssociatedEntity`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryAssociatedEntityRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Profile identifier. @@ -2365,6 +2463,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmQueryAssociatedProfile`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryAssociatedProfileRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Reference to a virtual machine, virtual disk, or datastore. @@ -2386,6 +2486,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmQueryAssociatedProfiles`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryAssociatedProfilesRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Array of server object references. @@ -2407,6 +2509,8 @@ func init() { } // The parameters of `PbmComplianceManager.PbmQueryByRollupComplianceStatus`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryByRollupComplianceStatusRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // `PbmComplianceStatus_enum` @@ -2428,6 +2532,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmQueryDefaultRequirementProfile`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryDefaultRequirementProfileRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Placement hub (i.e. datastore). @@ -2449,6 +2555,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmQueryDefaultRequirementProfiles`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryDefaultRequirementProfilesRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // The datastores for which the default profiles are requested. For @@ -2472,6 +2580,8 @@ func init() { } // The parameters of `PbmPlacementSolver.PbmQueryMatchingHub`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryMatchingHubRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Candidate list of hubs, either datastores or storage pods or a @@ -2497,6 +2607,8 @@ func init() { } // The parameters of `PbmPlacementSolver.PbmQueryMatchingHubWithSpec`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryMatchingHubWithSpecRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Candidate list of hubs, either datastores or storage @@ -2522,6 +2634,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmQueryProfile`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryProfileRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Type of resource. You can specify only STORAGE. @@ -2593,6 +2707,8 @@ func init() { } // The parameters of `PbmReplicationManager.PbmQueryReplicationGroups`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQueryReplicationGroupsRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Array of server object references. Valid types are @@ -2618,6 +2734,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmQuerySpaceStatsForStorageContainer`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmQuerySpaceStatsForStorageContainerRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Entity for which space statistics are being requested i.e datastore. @@ -2643,6 +2761,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmResetDefaultRequirementProfile`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmResetDefaultRequirementProfileRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Profile to reset. @@ -2706,6 +2826,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmRetrieveContent`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmRetrieveContentRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Array of storage profile identifiers. @@ -2767,16 +2889,16 @@ type PbmRollupComplianceResult struct { // // The overall compliance status is determined by the following rules, applied in the order // listed: - // - If all the entities are compliant, the overall status is - // compliant. - // - Else if any entity's status is outOfDate, the overall status is - // outOfDate. - // - Else if any entity's status is nonCompliant, the overall status is - // nonCompliant. - // - Else if any entity's status is unknown, the overall status is - // unknown. - // - Else if any entity's status is notApplicable, the overall status is - // notApplicable. + // - If all the entities are compliant, the overall status is + // compliant. + // - Else if any entity's status is outOfDate, the overall status is + // outOfDate. + // - Else if any entity's status is nonCompliant, the overall status is + // nonCompliant. + // - Else if any entity's status is unknown, the overall status is + // unknown. + // - Else if any entity's status is notApplicable, the overall status is + // notApplicable. OverallComplianceStatus string `xml:"overallComplianceStatus" json:"overallComplianceStatus"` // Overall compliance task status of the virtual machine and its virtual // disks. @@ -2831,7 +2953,6 @@ type PbmServerObjectRef struct { // The value of key depends // on the objectType. // - // // // // @@ -2896,6 +3017,8 @@ func init() { } // The parameters of `PbmProfileProfileManager.PbmUpdate`. +// +// This structure may be used only with operations rendered under `/pbm`. type PbmUpdateRequestType struct { This types.ManagedObjectReference `xml:"_this" json:"_this"` // Profile identifier. diff --git a/vendor/github.com/vmware/govmomi/vapi/internal/internal.go b/vendor/github.com/vmware/govmomi/vapi/internal/internal.go index 2872f3803..5931fd4ab 100644 --- a/vendor/github.com/vmware/govmomi/vapi/internal/internal.go +++ b/vendor/github.com/vmware/govmomi/vapi/internal/internal.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2018-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2018-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,6 +31,7 @@ const ( LibraryItemFileData = "/com/vmware/cis/data" LibraryItemPath = "/com/vmware/content/library/item" LibraryItemFilePath = "/com/vmware/content/library/item/file" + LibraryItemStoragePath = "/com/vmware/content/library/item/storage" LibraryItemUpdateSession = "/com/vmware/content/library/item/update-session" LibraryItemUpdateSessionFile = "/com/vmware/content/library/item/updatesession/file" LibraryItemDownloadSession = "/com/vmware/content/library/item/download-session" diff --git a/vendor/github.com/vmware/govmomi/vapi/library/finder/path.go b/vendor/github.com/vmware/govmomi/vapi/library/finder/path.go new file mode 100644 index 000000000..213e1ebd3 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vapi/library/finder/path.go @@ -0,0 +1,132 @@ +/* +Copyright (c) 2024-2024 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package finder + +import ( + "context" + "fmt" + "net/url" + "path" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vapi/library" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/types" +) + +// PathFinder is used to find the Datastore path of a library.Library, library.Item or library.File. +type PathFinder struct { + m *library.Manager + c *vim25.Client + cache map[string]string +} + +// NewPathFinder creates a new PathFinder instance. +func NewPathFinder(m *library.Manager, c *vim25.Client) *PathFinder { + return &PathFinder{ + m: m, + c: c, + cache: make(map[string]string), + } +} + +// Path returns the absolute datastore path for a Library, Item or File. +// The cache is used by DatastoreName(). +func (f *PathFinder) Path(ctx context.Context, r FindResult) (string, error) { + switch l := r.GetResult().(type) { + case library.Library: + id := "" + if len(l.Storage) != 0 { + var err error + id, err = f.datastoreName(ctx, l.Storage[0].DatastoreID) + if err != nil { + return "", err + } + } + return fmt.Sprintf("[%s] contentlib-%s", id, l.ID), nil + case library.Item: + p, err := f.Path(ctx, r.GetParent()) + if err != nil { + return "", err + } + return fmt.Sprintf("%s/%s", p, l.ID), nil + case library.File: + return f.getFileItemPath(ctx, r) + default: + return "", fmt.Errorf("unsupported type=%T", l) + } +} + +// getFileItemPath returns the absolute datastore path for a library.File +func (f *PathFinder) getFileItemPath(ctx context.Context, r FindResult) (string, error) { + name := r.GetName() + + dir, err := f.Path(ctx, r.GetParent()) + if err != nil { + return "", err + } + + p := path.Join(dir, name) + + lib := r.GetParent().GetParent().GetResult().(library.Library) + if len(lib.Storage) == 0 { + return p, nil + } + + // storage file name includes a uuid, for example: + // "ubuntu-14.04.6-server-amd64.iso" -> "ubuntu-14.04.6-server-amd64_0653e3f3-b4f4-41fb-9b72-c4102450e3dc.iso" + s, err := f.m.GetLibraryItemStorage(ctx, r.GetParent().GetID(), name) + if err != nil { + return p, err + } + // Currently there can only be 1 storage URI + if len(s) == 0 { + return p, nil + } + + uris := s[0].StorageURIs + if len(uris) == 0 { + return p, nil + } + u, err := url.Parse(uris[0]) + if err != nil { + return p, err + } + + return path.Join(dir, path.Base(u.Path)), nil +} + +// datastoreName returns the Datastore.Name for the given id. +func (f *PathFinder) datastoreName(ctx context.Context, id string) (string, error) { + if name, ok := f.cache[id]; ok { + return name, nil + } + + obj := types.ManagedObjectReference{ + Type: "Datastore", + Value: id, + } + + ds := object.NewDatastore(f.c, obj) + name, err := ds.ObjectName(ctx) + if err != nil { + return "", err + } + + f.cache[id] = name + return name, nil +} diff --git a/vendor/github.com/vmware/govmomi/vapi/library/library.go b/vendor/github.com/vmware/govmomi/vapi/library/library.go index c296a62aa..148464938 100644 --- a/vendor/github.com/vmware/govmomi/vapi/library/library.go +++ b/vendor/github.com/vmware/govmomi/vapi/library/library.go @@ -28,27 +28,28 @@ import ( "github.com/vmware/govmomi/vapi/rest" ) -// StorageBackings for Content Libraries -type StorageBackings struct { +// StorageBacking defines a storage location where content in a library will be stored. +type StorageBacking struct { DatastoreID string `json:"datastore_id,omitempty"` Type string `json:"type,omitempty"` + StorageURI string `json:"storage_uri,omitempty"` } // Library provides methods to create, read, update, delete, and enumerate libraries. type Library struct { - CreationTime *time.Time `json:"creation_time,omitempty"` - Description *string `json:"description,omitempty"` - ID string `json:"id,omitempty"` - LastModifiedTime *time.Time `json:"last_modified_time,omitempty"` - LastSyncTime *time.Time `json:"last_sync_time,omitempty"` - Name string `json:"name,omitempty"` - Storage []StorageBackings `json:"storage_backings,omitempty"` - Type string `json:"type,omitempty"` - Version string `json:"version,omitempty"` - Subscription *Subscription `json:"subscription_info,omitempty"` - Publication *Publication `json:"publish_info,omitempty"` - SecurityPolicyID string `json:"security_policy_id,omitempty"` - UnsetSecurityPolicyID bool `json:"unset_security_policy_id,omitempty"` + CreationTime *time.Time `json:"creation_time,omitempty"` + Description *string `json:"description,omitempty"` + ID string `json:"id,omitempty"` + LastModifiedTime *time.Time `json:"last_modified_time,omitempty"` + LastSyncTime *time.Time `json:"last_sync_time,omitempty"` + Name string `json:"name,omitempty"` + Storage []StorageBacking `json:"storage_backings,omitempty"` + Type string `json:"type,omitempty"` + Version string `json:"version,omitempty"` + Subscription *Subscription `json:"subscription_info,omitempty"` + Publication *Publication `json:"publish_info,omitempty"` + SecurityPolicyID string `json:"security_policy_id,omitempty"` + UnsetSecurityPolicyID bool `json:"unset_security_policy_id,omitempty"` } // Subscription info diff --git a/vendor/github.com/vmware/govmomi/vapi/library/library_item_storage.go b/vendor/github.com/vmware/govmomi/vapi/library/library_item_storage.go new file mode 100644 index 000000000..4e9fe9156 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vapi/library/library_item_storage.go @@ -0,0 +1,53 @@ +/* +Copyright (c) 2024-2024 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "context" + "net/http" + + "github.com/vmware/govmomi/vapi/internal" +) + +// Storage is an expanded form of library.File that includes details about the +// storage backing for a file in a library item +type Storage struct { + Checksum Checksum `json:"checksum_info,omitempty"` + StorageBacking StorageBacking `json:"storage_backing"` + StorageURIs []string `json:"storage_uris"` + Name string `json:"name"` + Size int64 `json:"size"` + Cached bool `json:"cached"` + Version string `json:"version"` +} + +// ListLibraryItemStorage returns a list of all the storage for a library item. +func (c *Manager) ListLibraryItemStorage(ctx context.Context, id string) ([]Storage, error) { + url := c.Resource(internal.LibraryItemStoragePath).WithParam("library_item_id", id) + var res []Storage + return res, c.Do(ctx, url.Request(http.MethodGet), &res) +} + +// GetLibraryItemStorage returns the storage for a specific file in a library item. +func (c *Manager) GetLibraryItemStorage(ctx context.Context, id, fileName string) ([]Storage, error) { + url := c.Resource(internal.LibraryItemStoragePath).WithID(id).WithAction("get") + spec := struct { + Name string `json:"file_name"` + }{fileName} + var res []Storage + return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res) +} diff --git a/vendor/github.com/vmware/govmomi/vim25/client.go b/vendor/github.com/vmware/govmomi/vim25/client.go index 7349183ab..3daaf131a 100644 --- a/vendor/github.com/vmware/govmomi/vim25/client.go +++ b/vendor/github.com/vmware/govmomi/vim25/client.go @@ -28,7 +28,7 @@ import ( const ( Namespace = "vim25" - Version = "8.0.2.0" + Version = "8.0.3.0" Path = "/sdk" ) diff --git a/vendor/github.com/vmware/govmomi/vim25/methods/methods.go b/vendor/github.com/vmware/govmomi/vim25/methods/methods.go index 15b05f8a8..2bc99d218 100644 --- a/vendor/github.com/vmware/govmomi/vim25/methods/methods.go +++ b/vendor/github.com/vmware/govmomi/vim25/methods/methods.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -2543,6 +2543,26 @@ func CreateCollectorForTasks(ctx context.Context, r soap.RoundTripper, req *type return resBody.Res, nil } +type CreateCollectorWithInfoFilterForTasksBody struct { + Req *types.CreateCollectorWithInfoFilterForTasks `xml:"urn:vim25 CreateCollectorWithInfoFilterForTasks,omitempty"` + Res *types.CreateCollectorWithInfoFilterForTasksResponse `xml:"CreateCollectorWithInfoFilterForTasksResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *CreateCollectorWithInfoFilterForTasksBody) Fault() *soap.Fault { return b.Fault_ } + +func CreateCollectorWithInfoFilterForTasks(ctx context.Context, r soap.RoundTripper, req *types.CreateCollectorWithInfoFilterForTasks) (*types.CreateCollectorWithInfoFilterForTasksResponse, error) { + var reqBody, resBody CreateCollectorWithInfoFilterForTasksBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type CreateContainerViewBody struct { Req *types.CreateContainerView `xml:"urn:vim25 CreateContainerView,omitempty"` Res *types.CreateContainerViewResponse `xml:"CreateContainerViewResponse,omitempty"` @@ -6883,6 +6903,26 @@ func HostProfileResetValidationState(ctx context.Context, r soap.RoundTripper, r return resBody.Res, nil } +type HostQueryVirtualDiskUuidBody struct { + Req *types.HostQueryVirtualDiskUuid `xml:"urn:vim25 HostQueryVirtualDiskUuid,omitempty"` + Res *types.HostQueryVirtualDiskUuidResponse `xml:"HostQueryVirtualDiskUuidResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostQueryVirtualDiskUuidBody) Fault() *soap.Fault { return b.Fault_ } + +func HostQueryVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.HostQueryVirtualDiskUuid) (*types.HostQueryVirtualDiskUuidResponse, error) { + var reqBody, resBody HostQueryVirtualDiskUuidBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HostReconcileDatastoreInventory_TaskBody struct { Req *types.HostReconcileDatastoreInventory_Task `xml:"urn:vim25 HostReconcileDatastoreInventory_Task,omitempty"` Res *types.HostReconcileDatastoreInventory_TaskResponse `xml:"HostReconcileDatastoreInventory_TaskResponse,omitempty"` @@ -7123,6 +7163,26 @@ func HostSetVStorageObjectControlFlags(ctx context.Context, r soap.RoundTripper, return resBody.Res, nil } +type HostSetVirtualDiskUuid_TaskBody struct { + Req *types.HostSetVirtualDiskUuid_Task `xml:"urn:vim25 HostSetVirtualDiskUuid_Task,omitempty"` + Res *types.HostSetVirtualDiskUuid_TaskResponse `xml:"HostSetVirtualDiskUuid_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *HostSetVirtualDiskUuid_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func HostSetVirtualDiskUuid_Task(ctx context.Context, r soap.RoundTripper, req *types.HostSetVirtualDiskUuid_Task) (*types.HostSetVirtualDiskUuid_TaskResponse, error) { + var reqBody, resBody HostSetVirtualDiskUuid_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type HostSpecGetUpdatedHostsBody struct { Req *types.HostSpecGetUpdatedHosts `xml:"urn:vim25 HostSpecGetUpdatedHosts,omitempty"` Res *types.HostSpecGetUpdatedHostsResponse `xml:"HostSpecGetUpdatedHostsResponse,omitempty"` @@ -8963,6 +9023,26 @@ func MoveVirtualDisk_Task(ctx context.Context, r soap.RoundTripper, req *types.M return resBody.Res, nil } +type NotifyAffectedServicesBody struct { + Req *types.NotifyAffectedServices `xml:"urn:vim25 NotifyAffectedServices,omitempty"` + Res *types.NotifyAffectedServicesResponse `xml:"NotifyAffectedServicesResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *NotifyAffectedServicesBody) Fault() *soap.Fault { return b.Fault_ } + +func NotifyAffectedServices(ctx context.Context, r soap.RoundTripper, req *types.NotifyAffectedServices) (*types.NotifyAffectedServicesResponse, error) { + var reqBody, resBody NotifyAffectedServicesBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type OpenInventoryViewFolderBody struct { Req *types.OpenInventoryViewFolder `xml:"urn:vim25 OpenInventoryViewFolder,omitempty"` Res *types.OpenInventoryViewFolderResponse `xml:"OpenInventoryViewFolderResponse,omitempty"` @@ -9323,6 +9403,26 @@ func PromoteDisks_Task(ctx context.Context, r soap.RoundTripper, req *types.Prom return resBody.Res, nil } +type ProvisionServerPrivateKeyBody struct { + Req *types.ProvisionServerPrivateKey `xml:"urn:vim25 ProvisionServerPrivateKey,omitempty"` + Res *types.ProvisionServerPrivateKeyResponse `xml:"ProvisionServerPrivateKeyResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *ProvisionServerPrivateKeyBody) Fault() *soap.Fault { return b.Fault_ } + +func ProvisionServerPrivateKey(ctx context.Context, r soap.RoundTripper, req *types.ProvisionServerPrivateKey) (*types.ProvisionServerPrivateKeyResponse, error) { + var reqBody, resBody ProvisionServerPrivateKeyBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type PutUsbScanCodesBody struct { Req *types.PutUsbScanCodes `xml:"urn:vim25 PutUsbScanCodes,omitempty"` Res *types.PutUsbScanCodesResponse `xml:"PutUsbScanCodesResponse,omitempty"` @@ -11523,6 +11623,26 @@ func QueryVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.Q return resBody.Res, nil } +type QueryVirtualDiskUuidExBody struct { + Req *types.QueryVirtualDiskUuidEx `xml:"urn:vim25 QueryVirtualDiskUuidEx,omitempty"` + Res *types.QueryVirtualDiskUuidExResponse `xml:"QueryVirtualDiskUuidExResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *QueryVirtualDiskUuidExBody) Fault() *soap.Fault { return b.Fault_ } + +func QueryVirtualDiskUuidEx(ctx context.Context, r soap.RoundTripper, req *types.QueryVirtualDiskUuidEx) (*types.QueryVirtualDiskUuidExResponse, error) { + var reqBody, resBody QueryVirtualDiskUuidExBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type QueryVmfsConfigOptionBody struct { Req *types.QueryVmfsConfigOption `xml:"urn:vim25 QueryVmfsConfigOption,omitempty"` Res *types.QueryVmfsConfigOptionResponse `xml:"QueryVmfsConfigOptionResponse,omitempty"` @@ -15763,6 +15883,26 @@ func SetVirtualDiskUuid(ctx context.Context, r soap.RoundTripper, req *types.Set return resBody.Res, nil } +type SetVirtualDiskUuidEx_TaskBody struct { + Req *types.SetVirtualDiskUuidEx_Task `xml:"urn:vim25 SetVirtualDiskUuidEx_Task,omitempty"` + Res *types.SetVirtualDiskUuidEx_TaskResponse `xml:"SetVirtualDiskUuidEx_TaskResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *SetVirtualDiskUuidEx_TaskBody) Fault() *soap.Fault { return b.Fault_ } + +func SetVirtualDiskUuidEx_Task(ctx context.Context, r soap.RoundTripper, req *types.SetVirtualDiskUuidEx_Task) (*types.SetVirtualDiskUuidEx_TaskResponse, error) { + var reqBody, resBody SetVirtualDiskUuidEx_TaskBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type ShrinkVirtualDisk_TaskBody struct { Req *types.ShrinkVirtualDisk_Task `xml:"urn:vim25 ShrinkVirtualDisk_Task,omitempty"` Res *types.ShrinkVirtualDisk_TaskResponse `xml:"ShrinkVirtualDisk_TaskResponse,omitempty"` @@ -18943,6 +19083,26 @@ func SetCustomValue(ctx context.Context, r soap.RoundTripper, req *types.SetCust return resBody.Res, nil } +type StartDpuFailoverBody struct { + Req *types.StartDpuFailover `xml:"urn:vim25 startDpuFailover,omitempty"` + Res *types.StartDpuFailoverResponse `xml:"startDpuFailoverResponse,omitempty"` + Fault_ *soap.Fault `xml:"http://schemas.xmlsoap.org/soap/envelope/ Fault,omitempty"` +} + +func (b *StartDpuFailoverBody) Fault() *soap.Fault { return b.Fault_ } + +func StartDpuFailover(ctx context.Context, r soap.RoundTripper, req *types.StartDpuFailover) (*types.StartDpuFailoverResponse, error) { + var reqBody, resBody StartDpuFailoverBody + + reqBody.Req = req + + if err := r.RoundTrip(ctx, &reqBody, &resBody); err != nil { + return nil, err + } + + return resBody.Res, nil +} + type UnregisterVApp_TaskBody struct { Req *types.UnregisterVApp_Task `xml:"urn:vim25 unregisterVApp_Task,omitempty"` Res *types.UnregisterVApp_TaskResponse `xml:"unregisterVApp_TaskResponse,omitempty"` diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/mo.go b/vendor/github.com/vmware/govmomi/vim25/mo/mo.go index 91a042c1c..5b91ae47d 100644 --- a/vendor/github.com/vmware/govmomi/vim25/mo/mo.go +++ b/vendor/github.com/vmware/govmomi/vim25/mo/mo.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -210,7 +210,7 @@ type CustomizationSpecManager struct { Self types.ManagedObjectReference `json:"self"` Info []types.CustomizationSpecInfo `json:"info"` - EncryptionKey []byte `json:"encryptionKey"` + EncryptionKey types.ByteSlice `json:"encryptionKey"` } func (m CustomizationSpecManager) Reference() types.ManagedObjectReference { diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go index 9f2b32486..66a8a9782 100644 --- a/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go +++ b/vendor/github.com/vmware/govmomi/vim25/mo/retrieve.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2014-2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -75,16 +75,19 @@ func ApplyPropertyChange(obj Reference, changes []types.PropertyChange) { v := reflect.ValueOf(obj) for _, p := range changes { - rv, ok := t.props[p.Name] - if !ok { - // For now, skip unknown properties allowing PC updates to be triggered - // for partial updates (e.g. extensionList["my.extension"]). - // Ultimately we should support partial updates by assigning the value - // reflectively in assignValue. - continue + var field Field + if !field.FromString(p.Name) { + panic(p.Name + ": invalid property path") } - assignValue(v, rv, reflect.ValueOf(p.Val)) + rv, ok := t.props[field.Path] + if !ok { + panic(field.Path + ": property not found") + } + + if field.Key == nil { // Key is only used for notifications + assignValue(v, rv, reflect.ValueOf(p.Val)) + } } } diff --git a/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go b/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go index 3b1ccce2d..21f59291e 100644 --- a/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go +++ b/vendor/github.com/vmware/govmomi/vim25/mo/type_info.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2014 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +20,7 @@ import ( "fmt" "reflect" "regexp" + "strconv" "strings" "sync" @@ -34,6 +35,9 @@ type typeInfo struct { // Map property names to field indices. props map[string][]int + + // Use base type for interface indices. + base bool } var typeInfoLock sync.RWMutex @@ -62,12 +66,22 @@ func typeInfoForType(tname string) *typeInfo { return ti } -func newTypeInfo(typ reflect.Type) *typeInfo { +func baseType(ftyp reflect.Type) reflect.Type { + base := strings.TrimPrefix(ftyp.Name(), "Base") + if kind, ok := types.TypeFunc()(base); ok { + return kind + } + return ftyp +} + +func newTypeInfo(typ reflect.Type, base ...bool) *typeInfo { t := typeInfo{ typ: typ, props: make(map[string][]int), } - + if len(base) == 1 { + t.base = base[0] + } t.build(typ, "", []int{}) return &t @@ -155,6 +169,15 @@ func (t *typeInfo) build(typ reflect.Type, fn string, fi []int) { if ftyp.Kind() == reflect.Struct { t.build(ftyp, fnc, fic) } + + // Indexed property path may traverse into array element fields. + // When interface, use the base type to index fields. + // For example, BaseVirtualDevice: + // config.hardware.device[4000].deviceInfo.label + if t.base && ftyp.Kind() == reflect.Interface { + base := baseType(ftyp) + t.build(base, fnc, fic) + } } } @@ -164,7 +187,14 @@ var nilValue reflect.Value // slice of field indices. It recurses into the struct until it finds the field // specified by the indices. It creates new values for pointer types where // needed. -func assignValue(val reflect.Value, fi []int, pv reflect.Value) { +func assignValue(val reflect.Value, fi []int, pv reflect.Value, field ...string) { + // Indexed property path can only use base types + if val.Kind() == reflect.Interface { + base := baseType(val.Type()) + val.Set(reflect.New(base)) + val = val.Elem() + } + // Create new value if necessary. if val.Kind() == reflect.Ptr { if val.IsNil() { @@ -230,6 +260,43 @@ func assignValue(val reflect.Value, fi []int, pv reflect.Value) { rv.Set(pv) } else if rt.ConvertibleTo(pt) { rv.Set(pv.Convert(rt)) + } else if rt.Kind() == reflect.Slice { + // Indexed array value + path := field[0] + isInterface := rt.Elem().Kind() == reflect.Interface + + if len(path) == 0 { + // Append item (pv) directly to the array, converting to pointer if interface + if isInterface { + npv := reflect.New(pt) + npv.Elem().Set(pv) + pv = npv + pt = pv.Type() + } + } else { + // Construct item to be appended to the array, setting field within to value of pv + var item reflect.Value + if isInterface { + base := baseType(rt.Elem()) + item = reflect.New(base) + } else { + item = reflect.New(rt.Elem()) + } + + field := newTypeInfo(item.Type(), true) + if ix, ok := field.props[path]; ok { + assignValue(item, ix, pv) + } + + if rt.Elem().Kind() == reflect.Struct { + pv = item.Elem() + } else { + pv = item + } + pt = pv.Type() + } + + rv.Set(reflect.Append(rv, pv)) } else { panic(fmt.Sprintf("cannot assign %q (%s) to %q (%s)", rt.Name(), rt.Kind(), pt.Name(), pt.Kind())) } @@ -237,7 +304,7 @@ func assignValue(val reflect.Value, fi []int, pv reflect.Value) { return } - assignValue(rv, fi, pv) + assignValue(rv, fi, pv, field...) } var arrayOfRegexp = regexp.MustCompile("ArrayOf(.*)$") @@ -250,11 +317,14 @@ func (t *typeInfo) LoadFromObjectContent(o types.ObjectContent) (reflect.Value, assignValue(v, t.self, reflect.ValueOf(o.Obj)) for _, p := range o.PropSet { - rv, ok := t.props[p.Name] + var field Field + field.FromString(p.Name) + + rv, ok := t.props[field.Path] if !ok { continue } - assignValue(v, rv, reflect.ValueOf(p.Val)) + assignValue(v, rv, reflect.ValueOf(p.Val), field.Item) } return v, nil @@ -264,3 +334,70 @@ func IsManagedObjectType(kind string) bool { _, ok := t[kind] return ok } + +// Field of a ManagedObject in string form. +type Field struct { + Path string + Key any + Item string +} + +func (f *Field) String() string { + if f.Key == nil { + return f.Path + } + + var key, item string + + switch f.Key.(type) { + case string: + key = fmt.Sprintf("%q", f.Key) + default: + key = fmt.Sprintf("%d", f.Key) + } + + if f.Item != "" { + item = "." + f.Item + } + + return fmt.Sprintf("%s[%s]%s", f.Path, key, item) +} + +func (f *Field) FromString(spec string) bool { + s := strings.SplitN(spec, "[", 2) + f.Path = s[0] + f.Key = nil + f.Item = "" + if len(s) == 1 { + return true + } + + parts := strings.SplitN(s[1], "]", 2) + + if len(parts) != 2 { + return false + } + + ix := strings.Trim(parts[0], `"`) + + if ix == parts[0] { + v, err := strconv.ParseInt(ix, 0, 32) + if err != nil { + return false + } + f.Key = int32(v) + } else { + f.Key = ix + } + + if parts[1] == "" { + return true + } + + if parts[1][0] != '.' { + return false + } + f.Item = parts[1][1:] + + return true +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/byte_slice.go b/vendor/github.com/vmware/govmomi/vim25/types/byte_slice.go new file mode 100644 index 000000000..a19a0bb23 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/byte_slice.go @@ -0,0 +1,67 @@ +/* +Copyright (c) 2024-2024 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" + "io" + "math" + "strconv" + + "github.com/vmware/govmomi/vim25/xml" +) + +// ByteSlice implements vCenter compatibile xml encoding and decoding for a byte slice. +// vCenter encodes each byte of the array in its own xml element, whereas +// Go encodes the entire byte array in a single xml element. +type ByteSlice []byte + +// MarshalXML implements xml.Marshaler +func (b ByteSlice) MarshalXML(e *xml.Encoder, field xml.StartElement) error { + start := xml.StartElement{ + Name: field.Name, + } + for i := range b { + if err := e.EncodeElement(b[i], start); err != nil { + return err + } + } + return nil +} + +// UnmarshalXML implements xml.Unmarshaler +func (b *ByteSlice) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { + break + } + + if c, ok := t.(xml.CharData); ok { + n, err := strconv.ParseInt(string(c), 10, 16) + if err != nil { + return err + } + if n > math.MaxUint8 { + return fmt.Errorf("parsing %q: uint8 overflow", start.Name.Local) + } + *b = append(*b, byte(n)) + } + } + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/configspec.go b/vendor/github.com/vmware/govmomi/vim25/types/configspec.go new file mode 100644 index 000000000..a1c8404c6 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/vim25/types/configspec.go @@ -0,0 +1,609 @@ +/* +Copyright (c) 2024-2024 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" +) + +// EnsureDisksHaveControllers ensures that all disks in the provided +// ConfigSpec point to a controller. If no controller exists, LSILogic SCSI +// controllers are added to the ConfigSpec as necessary for the disks. +// +// Please note the following table for the number of controllers of each type +// that are supported as well as how many disks (per controller) each supports: +// +// SATA +// - controllers 4 +// - disks 30 +// +// SCSI +// - controllers 4 +// - disks (non-paravirtual) 16 +// - disks (paravirtual, hardware version <14) 16 +// - disks (paravirtual, hardware version >=14) 256 +// +// NVME +// - controllers 4 +// - disks (hardware version <20) 15 +// - disks (hardware version >=21) 255 +func (cs *VirtualMachineConfigSpec) EnsureDisksHaveControllers( + existingDevices ...BaseVirtualDevice) error { + + if cs == nil { + panic("configSpec is nil") + } + + var ( + disks []*VirtualDisk + newDeviceKey int32 + pciController *VirtualPCIController + diskControllers = ensureDiskControllerData{ + controllerKeys: map[int32]BaseVirtualController{}, + controllerKeysToAttachedDisks: map[int32]int{}, + } + ) + + // Inspect the ConfigSpec + for i := range cs.DeviceChange { + var ( + bdc BaseVirtualDeviceConfigSpec + bvd BaseVirtualDevice + dc *VirtualDeviceConfigSpec + d *VirtualDevice + ) + + if bdc = cs.DeviceChange[i]; bdc == nil { + continue + } + + if dc = bdc.GetVirtualDeviceConfigSpec(); dc == nil { + continue + } + + if dc.Operation == VirtualDeviceConfigSpecOperationRemove { + // Do not consider devices being removed. + continue + } + + bvd = dc.Device + if bvd == nil { + continue + } + + if d = bvd.GetVirtualDevice(); d == nil { + continue + } + + switch tvd := bvd.(type) { + case *VirtualPCIController: + pciController = tvd + + case + // SCSI + *ParaVirtualSCSIController, + *VirtualBusLogicController, + *VirtualLsiLogicController, + *VirtualLsiLogicSASController, + *VirtualSCSIController, + + // SATA + *VirtualSATAController, + *VirtualAHCIController, + + // NVME + *VirtualNVMEController: + + diskControllers.add(bvd) + + case *VirtualDisk: + + disks = append(disks, tvd) + + if controllerKey := d.ControllerKey; controllerKey != 0 { + // If the disk points to a controller key, then increment + // the number of devices attached to that controller. + // + // Please note that at this point it is not yet known if the + // controller key is a *valid* controller. + diskControllers.attach(controllerKey) + } + } + + // Keep track of the smallest device key used. Please note, because + // device keys in a ConfigSpec are negative numbers, -200 going to be + // smaller than -1. + if d.Key < newDeviceKey { + newDeviceKey = d.Key + } + } + + if len(disks) == 0 { + // If there are no disks, then go ahead and return early. + return nil + } + + // Categorize any controllers that already exist. + for i := range existingDevices { + var ( + d *VirtualDevice + bvd = existingDevices[i] + ) + + if bvd == nil { + continue + } + + if d = bvd.GetVirtualDevice(); d == nil { + continue + } + + switch tvd := bvd.(type) { + case *VirtualPCIController: + pciController = tvd + case + // SCSI + *ParaVirtualSCSIController, + *VirtualBusLogicController, + *VirtualLsiLogicController, + *VirtualLsiLogicSASController, + *VirtualSCSIController, + + // SATA + *VirtualSATAController, + *VirtualAHCIController, + + // NVME + *VirtualNVMEController: + + diskControllers.add(bvd) + + case *VirtualDisk: + diskControllers.attach(tvd.ControllerKey) + } + } + + // Decrement the newDeviceKey so the next device has a unique key. + newDeviceKey-- + + if pciController == nil { + // Add a PCI controller if one is not present. + pciController = &VirtualPCIController{ + VirtualController: VirtualController{ + VirtualDevice: VirtualDevice{ + Key: newDeviceKey, + }, + }, + } + + // Decrement the newDeviceKey so the next device has a unique key. + newDeviceKey-- + + // Add the new PCI controller to the ConfigSpec. + cs.DeviceChange = append( + cs.DeviceChange, + &VirtualDeviceConfigSpec{ + Operation: VirtualDeviceConfigSpecOperationAdd, + Device: pciController, + }) + } + + // Ensure all the recorded controller keys that point to disks are actually + // valid controller keys. + diskControllers.validateAttachments() + + for i := range disks { + disk := disks[i] + + // If the disk already points to a controller then skip to the next + // disk. + if diskControllers.exists(disk.ControllerKey) { + continue + } + + // The disk does not point to a controller, so try to locate one. + if ensureDiskControllerFind(disk, &diskControllers) { + // A controller was located for the disk, so go ahead and skip to + // the next disk. + continue + } + + // No controller was located for the disk, so a controller must be + // created. + if err := ensureDiskControllerCreate( + cs, + pciController, + newDeviceKey, + &diskControllers); err != nil { + + return err + } + + // Point the disk to the new controller. + disk.ControllerKey = newDeviceKey + + // Add the controller key to the map that tracks how many disks are + // attached to a given controller. + diskControllers.attach(newDeviceKey) + + // Decrement the newDeviceKey so the next device has a unique key. + newDeviceKey-- + } + + return nil +} + +const ( + maxSCSIControllers = 4 + maxSATAControllers = 4 + maxNVMEControllers = 4 + maxDisksPerSCSIController = 16 + maxDisksPerPVSCSIControllerHWVersion14 = 256 // TODO(akutz) + maxDisksPerSATAController = 30 + maxDisksPerNVMEController = 15 + maxDisksPerNVMEControllerHWVersion21 = 255 // TODO(akutz) +) + +type ensureDiskControllerBusNumbers struct { + zero bool + one bool + two bool +} + +func (d ensureDiskControllerBusNumbers) free() int32 { + switch { + case !d.zero: + return 0 + case !d.one: + return 1 + case !d.two: + return 2 + default: + return 3 + } +} + +func (d *ensureDiskControllerBusNumbers) set(busNumber int32) { + switch busNumber { + case 0: + d.zero = true + case 1: + d.one = true + case 2: + d.two = true + } +} + +type ensureDiskControllerData struct { + // TODO(akutz) Use the hardware version when calculating the max disks for + // a given controller type. + // hardwareVersion int + + controllerKeys map[int32]BaseVirtualController + controllerKeysToAttachedDisks map[int32]int + + // SCSI + scsiBusNumbers ensureDiskControllerBusNumbers + pvSCSIControllerKeys []int32 + busLogicSCSIControllerKeys []int32 + lsiLogicControllerKeys []int32 + lsiLogicSASControllerKeys []int32 + scsiControllerKeys []int32 + + // SATA + sataBusNumbers ensureDiskControllerBusNumbers + sataControllerKeys []int32 + ahciControllerKeys []int32 + + // NVME + nvmeBusNumbers ensureDiskControllerBusNumbers + nvmeControllerKeys []int32 +} + +func (d ensureDiskControllerData) numSCSIControllers() int { + return len(d.pvSCSIControllerKeys) + + len(d.busLogicSCSIControllerKeys) + + len(d.lsiLogicControllerKeys) + + len(d.lsiLogicSASControllerKeys) + + len(d.scsiControllerKeys) +} + +func (d ensureDiskControllerData) numSATAControllers() int { + return len(d.sataControllerKeys) + len(d.ahciControllerKeys) +} + +func (d ensureDiskControllerData) numNVMEControllers() int { + return len(d.nvmeControllerKeys) +} + +// validateAttachments ensures the attach numbers are correct by removing any +// keys from controllerKeysToAttachedDisks that do not also exist in +// controllerKeys. +func (d ensureDiskControllerData) validateAttachments() { + // Remove any invalid controllers from controllerKeyToNumDiskMap. + for key := range d.controllerKeysToAttachedDisks { + if _, ok := d.controllerKeys[key]; !ok { + delete(d.controllerKeysToAttachedDisks, key) + } + } +} + +// exists returns true if a controller with the provided key exists. +func (d ensureDiskControllerData) exists(key int32) bool { + return d.controllerKeys[key] != nil +} + +// add records the provided controller in the map that relates keys to +// controllers as well as appends the key to the list of controllers of that +// given type. +func (d *ensureDiskControllerData) add(controller BaseVirtualDevice) { + + // Get the controller's device key. + bvc := controller.(BaseVirtualController) + key := bvc.GetVirtualController().Key + busNumber := bvc.GetVirtualController().BusNumber + + // Record the controller's device key in the controller key map. + d.controllerKeys[key] = bvc + + // Record the controller's device key in the list for that type of + // controller. + switch controller.(type) { + + // SCSI + case *ParaVirtualSCSIController: + d.pvSCSIControllerKeys = append(d.pvSCSIControllerKeys, key) + d.scsiBusNumbers.set(busNumber) + case *VirtualBusLogicController: + d.busLogicSCSIControllerKeys = append(d.busLogicSCSIControllerKeys, key) + d.scsiBusNumbers.set(busNumber) + case *VirtualLsiLogicController: + d.lsiLogicControllerKeys = append(d.lsiLogicControllerKeys, key) + d.scsiBusNumbers.set(busNumber) + case *VirtualLsiLogicSASController: + d.lsiLogicSASControllerKeys = append(d.lsiLogicSASControllerKeys, key) + d.scsiBusNumbers.set(busNumber) + case *VirtualSCSIController: + d.scsiControllerKeys = append(d.scsiControllerKeys, key) + d.scsiBusNumbers.set(busNumber) + + // SATA + case *VirtualSATAController: + d.sataControllerKeys = append(d.sataControllerKeys, key) + d.sataBusNumbers.set(busNumber) + case *VirtualAHCIController: + d.ahciControllerKeys = append(d.ahciControllerKeys, key) + d.sataBusNumbers.set(busNumber) + + // NVME + case *VirtualNVMEController: + d.nvmeControllerKeys = append(d.nvmeControllerKeys, key) + d.nvmeBusNumbers.set(busNumber) + } +} + +// attach increments the number of disks attached to the controller identified +// by the provided controller key. +func (d *ensureDiskControllerData) attach(controllerKey int32) { + d.controllerKeysToAttachedDisks[controllerKey]++ +} + +// hasFreeSlot returns whether or not the controller identified by the provided +// controller key has a free slot to attach a disk. +// +// TODO(akutz) Consider the hardware version when calculating these values. +func (d *ensureDiskControllerData) hasFreeSlot(controllerKey int32) bool { + + var maxDisksForType int + + switch d.controllerKeys[controllerKey].(type) { + case + // SCSI (paravirtual) + *ParaVirtualSCSIController: + + maxDisksForType = maxDisksPerSCSIController + + case + // SCSI (non-paravirtual) + *VirtualBusLogicController, + *VirtualLsiLogicController, + *VirtualLsiLogicSASController, + *VirtualSCSIController: + + maxDisksForType = maxDisksPerSCSIController + + case + // SATA + *VirtualSATAController, + *VirtualAHCIController: + + maxDisksForType = maxDisksPerSATAController + + case + // NVME + *VirtualNVMEController: + + maxDisksForType = maxDisksPerNVMEController + } + + return d.controllerKeysToAttachedDisks[controllerKey] < maxDisksForType-1 +} + +// ensureDiskControllerFind attempts to locate a controller for the provided +// disk. +// +// Please note this function is written to preserve the order in which +// controllers are located by preferring controller types in the order in which +// they are listed in this function. This prevents the following situation: +// +// - A ConfigSpec has three controllers in the following order: PVSCSI-1, +// NVME-1, and PVSCSI-2. +// - The controller PVSCSI-1 is full while NVME-1 and PVSCSI-2 have free +// slots. +// - The *desired* behavior is to look at all, possible PVSCSI controllers +// before moving onto SATA and then finally NVME controllers. +// - If the function iterated over the device list in list-order, then the +// NVME-1 controller would be located first. +// - Instead, this function iterates over each *type* of controller first +// before moving onto the next type. +// - This means that even though NVME-1 has free slots, PVSCSI-2 is checked +// first. +// +// The order of preference is as follows: +// +// * SCSI +// - ParaVirtualSCSIController +// - VirtualBusLogicController +// - VirtualLsiLogicController +// - VirtualLsiLogicSASController +// - VirtualSCSIController +// +// * SATA +// - VirtualSATAController +// - VirtualAHCIController +// +// * NVME +// - VirtualNVMEController +func ensureDiskControllerFind( + disk *VirtualDisk, + diskControllers *ensureDiskControllerData) bool { + + return false || + // SCSI + ensureDiskControllerFindWith( + disk, + diskControllers, + diskControllers.pvSCSIControllerKeys) || + ensureDiskControllerFindWith( + disk, + diskControllers, + diskControllers.busLogicSCSIControllerKeys) || + ensureDiskControllerFindWith( + disk, + diskControllers, + diskControllers.lsiLogicControllerKeys) || + ensureDiskControllerFindWith( + disk, + diskControllers, + diskControllers.lsiLogicSASControllerKeys) || + ensureDiskControllerFindWith( + disk, + diskControllers, + diskControllers.scsiControllerKeys) || + + // SATA + ensureDiskControllerFindWith( + disk, + diskControllers, + diskControllers.sataControllerKeys) || + ensureDiskControllerFindWith( + disk, + diskControllers, + diskControllers.ahciControllerKeys) || + + // NVME + ensureDiskControllerFindWith( + disk, + diskControllers, + diskControllers.nvmeControllerKeys) +} + +func ensureDiskControllerFindWith( + disk *VirtualDisk, + diskControllers *ensureDiskControllerData, + controllerKeys []int32) bool { + + for i := range controllerKeys { + controllerKey := controllerKeys[i] + if diskControllers.hasFreeSlot(controllerKey) { + // If the controller has room for another disk, then use this + // controller for the current disk. + disk.ControllerKey = controllerKey + diskControllers.attach(controllerKey) + return true + } + } + return false +} + +func ensureDiskControllerCreate( + configSpec *VirtualMachineConfigSpec, + pciController *VirtualPCIController, + newDeviceKey int32, + diskControllers *ensureDiskControllerData) error { + + var controller BaseVirtualDevice + switch { + case diskControllers.numSCSIControllers() < maxSCSIControllers: + // Prefer creating a new SCSI controller. + controller = &ParaVirtualSCSIController{ + VirtualSCSIController: VirtualSCSIController{ + VirtualController: VirtualController{ + VirtualDevice: VirtualDevice{ + ControllerKey: pciController.Key, + Key: newDeviceKey, + }, + BusNumber: diskControllers.scsiBusNumbers.free(), + }, + HotAddRemove: NewBool(true), + SharedBus: VirtualSCSISharingNoSharing, + }, + } + case diskControllers.numSATAControllers() < maxSATAControllers: + // If there are no more SCSI controllers, create a SATA + // controller. + controller = &VirtualAHCIController{ + VirtualSATAController: VirtualSATAController{ + VirtualController: VirtualController{ + VirtualDevice: VirtualDevice{ + ControllerKey: pciController.Key, + Key: newDeviceKey, + }, + BusNumber: diskControllers.sataBusNumbers.free(), + }, + }, + } + case diskControllers.numNVMEControllers() < maxNVMEControllers: + // If there are no more SATA controllers, create an NVME + // controller. + controller = &VirtualNVMEController{ + VirtualController: VirtualController{ + VirtualDevice: VirtualDevice{ + ControllerKey: pciController.Key, + Key: newDeviceKey, + }, + BusNumber: diskControllers.nvmeBusNumbers.free(), + }, + SharedBus: string(VirtualNVMEControllerSharingNoSharing), + } + default: + return fmt.Errorf("no controllers available") + } + + // Add the new controller to the ConfigSpec. + configSpec.DeviceChange = append( + configSpec.DeviceChange, + &VirtualDeviceConfigSpec{ + Operation: VirtualDeviceConfigSpecOperationAdd, + Device: controller, + }) + + // Record the new controller. + diskControllers.add(controller) + + return nil +} diff --git a/vendor/github.com/vmware/govmomi/vim25/types/enum.go b/vendor/github.com/vmware/govmomi/vim25/types/enum.go index c3ca1409c..ea07f156b 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/enum.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/enum.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,6 +49,24 @@ const ( ActionParameterAlarm = ActionParameter("alarm") ) +func (e ActionParameter) Values() []ActionParameter { + return []ActionParameter{ + ActionParameterTargetName, + ActionParameterAlarmName, + ActionParameterOldStatus, + ActionParameterNewStatus, + ActionParameterTriggeringSummary, + ActionParameterDeclaringSummary, + ActionParameterEventDescription, + ActionParameterTarget, + ActionParameterAlarm, + } +} + +func (e ActionParameter) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ActionParameter"] = reflect.TypeOf((*ActionParameter)(nil)).Elem() } @@ -56,6 +74,7 @@ func init() { // Pre-defined constants for possible action types. // // Virtual Center +// uses this information to coordinate with the clients. type ActionType string const ( @@ -77,16 +96,25 @@ const ( ActionTypeHostInfraUpdateHaV1 = ActionType("HostInfraUpdateHaV1") ) +func (e ActionType) Values() []ActionType { + return []ActionType{ + ActionTypeMigrationV1, + ActionTypeVmPowerV1, + ActionTypeHostPowerV1, + ActionTypeHostMaintenanceV1, + ActionTypeStorageMigrationV1, + ActionTypeStoragePlacementV1, + ActionTypePlacementV1, + ActionTypeHostInfraUpdateHaV1, + } +} + +func (e ActionType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ActionType"] = reflect.TypeOf((*ActionType)(nil)).Elem() - minAPIVersionForType["ActionType"] = "2.5" - minAPIVersionForEnumValue["ActionType"] = map[string]string{ - "HostMaintenanceV1": "5.0", - "StorageMigrationV1": "5.0", - "StoragePlacementV1": "5.0", - "PlacementV1": "6.0", - "HostInfraUpdateHaV1": "6.5", - } } // Types of affinities. @@ -97,6 +125,17 @@ const ( AffinityTypeCpu = AffinityType("cpu") ) +func (e AffinityType) Values() []AffinityType { + return []AffinityType{ + AffinityTypeMemory, + AffinityTypeCpu, + } +} + +func (e AffinityType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["AffinityType"] = reflect.TypeOf((*AffinityType)(nil)).Elem() } @@ -124,11 +163,29 @@ const ( AgentInstallFailedReasonUnknownInstallerError = AgentInstallFailedReason("UnknownInstallerError") ) -func init() { - t["AgentInstallFailedReason"] = reflect.TypeOf((*AgentInstallFailedReason)(nil)).Elem() - minAPIVersionForType["AgentInstallFailedReason"] = "4.0" +func (e AgentInstallFailedReason) Values() []AgentInstallFailedReason { + return []AgentInstallFailedReason{ + AgentInstallFailedReasonNotEnoughSpaceOnDevice, + AgentInstallFailedReasonPrepareToUpgradeFailed, + AgentInstallFailedReasonAgentNotRunning, + AgentInstallFailedReasonAgentNotReachable, + AgentInstallFailedReasonInstallTimedout, + AgentInstallFailedReasonSignatureVerificationFailed, + AgentInstallFailedReasonAgentUploadFailed, + AgentInstallFailedReasonAgentUploadTimedout, + AgentInstallFailedReasonUnknownInstallerError, + } } +func (e AgentInstallFailedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["AgentInstallFailedReason"] = reflect.TypeOf((*AgentInstallFailedReason)(nil)).Elem() +} + +// Alarm entity type type AlarmFilterSpecAlarmTypeByEntity string const ( @@ -140,14 +197,26 @@ const ( AlarmFilterSpecAlarmTypeByEntityEntityTypeVm = AlarmFilterSpecAlarmTypeByEntity("entityTypeVm") ) +func (e AlarmFilterSpecAlarmTypeByEntity) Values() []AlarmFilterSpecAlarmTypeByEntity { + return []AlarmFilterSpecAlarmTypeByEntity{ + AlarmFilterSpecAlarmTypeByEntityEntityTypeAll, + AlarmFilterSpecAlarmTypeByEntityEntityTypeHost, + AlarmFilterSpecAlarmTypeByEntityEntityTypeVm, + } +} + +func (e AlarmFilterSpecAlarmTypeByEntity) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["AlarmFilterSpecAlarmTypeByEntity"] = reflect.TypeOf((*AlarmFilterSpecAlarmTypeByEntity)(nil)).Elem() - minAPIVersionForType["AlarmFilterSpecAlarmTypeByEntity"] = "6.7" } // Alarm triggering type. // // The main divisions are event triggered and +// metric- or state-based alarms. type AlarmFilterSpecAlarmTypeByTrigger string const ( @@ -159,11 +228,23 @@ const ( AlarmFilterSpecAlarmTypeByTriggerTriggerTypeMetric = AlarmFilterSpecAlarmTypeByTrigger("triggerTypeMetric") ) -func init() { - t["AlarmFilterSpecAlarmTypeByTrigger"] = reflect.TypeOf((*AlarmFilterSpecAlarmTypeByTrigger)(nil)).Elem() - minAPIVersionForType["AlarmFilterSpecAlarmTypeByTrigger"] = "6.7" +func (e AlarmFilterSpecAlarmTypeByTrigger) Values() []AlarmFilterSpecAlarmTypeByTrigger { + return []AlarmFilterSpecAlarmTypeByTrigger{ + AlarmFilterSpecAlarmTypeByTriggerTriggerTypeAll, + AlarmFilterSpecAlarmTypeByTriggerTriggerTypeEvent, + AlarmFilterSpecAlarmTypeByTriggerTriggerTypeMetric, + } } +func (e AlarmFilterSpecAlarmTypeByTrigger) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["AlarmFilterSpecAlarmTypeByTrigger"] = reflect.TypeOf((*AlarmFilterSpecAlarmTypeByTrigger)(nil)).Elem() +} + +// Defines the result status values for a validating answer file. type AnswerFileValidationInfoStatus string const ( @@ -175,9 +256,20 @@ const ( AnswerFileValidationInfoStatusFailed_defaults = AnswerFileValidationInfoStatus("failed_defaults") ) +func (e AnswerFileValidationInfoStatus) Values() []AnswerFileValidationInfoStatus { + return []AnswerFileValidationInfoStatus{ + AnswerFileValidationInfoStatusSuccess, + AnswerFileValidationInfoStatusFailed, + AnswerFileValidationInfoStatusFailed_defaults, + } +} + +func (e AnswerFileValidationInfoStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["AnswerFileValidationInfoStatus"] = reflect.TypeOf((*AnswerFileValidationInfoStatus)(nil)).Elem() - minAPIVersionForType["AnswerFileValidationInfoStatus"] = "6.7" } type ApplyHostProfileConfigurationResultStatus string @@ -207,9 +299,25 @@ const ( ApplyHostProfileConfigurationResultStatusCanceled = ApplyHostProfileConfigurationResultStatus("canceled") ) +func (e ApplyHostProfileConfigurationResultStatus) Values() []ApplyHostProfileConfigurationResultStatus { + return []ApplyHostProfileConfigurationResultStatus{ + ApplyHostProfileConfigurationResultStatusSuccess, + ApplyHostProfileConfigurationResultStatusFailed, + ApplyHostProfileConfigurationResultStatusReboot_failed, + ApplyHostProfileConfigurationResultStatusStateless_reboot_failed, + ApplyHostProfileConfigurationResultStatusCheck_compliance_failed, + ApplyHostProfileConfigurationResultStatusState_not_satisfied, + ApplyHostProfileConfigurationResultStatusExit_maintenancemode_failed, + ApplyHostProfileConfigurationResultStatusCanceled, + } +} + +func (e ApplyHostProfileConfigurationResultStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ApplyHostProfileConfigurationResultStatus"] = reflect.TypeOf((*ApplyHostProfileConfigurationResultStatus)(nil)).Elem() - minAPIVersionForType["ApplyHostProfileConfigurationResultStatus"] = "6.5" } // This list specifies the type of operation being performed on the array. @@ -228,6 +336,18 @@ const ( ArrayUpdateOperationEdit = ArrayUpdateOperation("edit") ) +func (e ArrayUpdateOperation) Values() []ArrayUpdateOperation { + return []ArrayUpdateOperation{ + ArrayUpdateOperationAdd, + ArrayUpdateOperationRemove, + ArrayUpdateOperationEdit, + } +} + +func (e ArrayUpdateOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ArrayUpdateOperation"] = reflect.TypeOf((*ArrayUpdateOperation)(nil)).Elem() } @@ -260,6 +380,21 @@ const ( AutoStartActionSuspend = AutoStartAction("suspend") ) +func (e AutoStartAction) Values() []AutoStartAction { + return []AutoStartAction{ + AutoStartActionNone, + AutoStartActionSystemDefault, + AutoStartActionPowerOn, + AutoStartActionPowerOff, + AutoStartActionGuestShutdown, + AutoStartActionSuspend, + } +} + +func (e AutoStartAction) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["AutoStartAction"] = reflect.TypeOf((*AutoStartAction)(nil)).Elem() } @@ -288,10 +423,23 @@ const ( AutoStartWaitHeartbeatSettingSystemDefault = AutoStartWaitHeartbeatSetting("systemDefault") ) +func (e AutoStartWaitHeartbeatSetting) Values() []AutoStartWaitHeartbeatSetting { + return []AutoStartWaitHeartbeatSetting{ + AutoStartWaitHeartbeatSettingYes, + AutoStartWaitHeartbeatSettingNo, + AutoStartWaitHeartbeatSettingSystemDefault, + } +} + +func (e AutoStartWaitHeartbeatSetting) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["AutoStartWaitHeartbeatSetting"] = reflect.TypeOf((*AutoStartWaitHeartbeatSetting)(nil)).Elem() } +// Provisioning type constants. type BaseConfigInfoDiskFileBackingInfoProvisioningType string const ( @@ -311,11 +459,23 @@ const ( BaseConfigInfoDiskFileBackingInfoProvisioningTypeLazyZeroedThick = BaseConfigInfoDiskFileBackingInfoProvisioningType("lazyZeroedThick") ) -func init() { - t["BaseConfigInfoDiskFileBackingInfoProvisioningType"] = reflect.TypeOf((*BaseConfigInfoDiskFileBackingInfoProvisioningType)(nil)).Elem() - minAPIVersionForType["BaseConfigInfoDiskFileBackingInfoProvisioningType"] = "6.5" +func (e BaseConfigInfoDiskFileBackingInfoProvisioningType) Values() []BaseConfigInfoDiskFileBackingInfoProvisioningType { + return []BaseConfigInfoDiskFileBackingInfoProvisioningType{ + BaseConfigInfoDiskFileBackingInfoProvisioningTypeThin, + BaseConfigInfoDiskFileBackingInfoProvisioningTypeEagerZeroedThick, + BaseConfigInfoDiskFileBackingInfoProvisioningTypeLazyZeroedThick, + } } +func (e BaseConfigInfoDiskFileBackingInfoProvisioningType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["BaseConfigInfoDiskFileBackingInfoProvisioningType"] = reflect.TypeOf((*BaseConfigInfoDiskFileBackingInfoProvisioningType)(nil)).Elem() +} + +// Enum representing result of batch-APis. type BatchResultResult string const ( @@ -323,9 +483,19 @@ const ( BatchResultResultFail = BatchResultResult("fail") ) +func (e BatchResultResult) Values() []BatchResultResult { + return []BatchResultResult{ + BatchResultResultSuccess, + BatchResultResultFail, + } +} + +func (e BatchResultResult) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["BatchResultResult"] = reflect.TypeOf((*BatchResultResult)(nil)).Elem() - minAPIVersionForType["BatchResultResult"] = "6.0" } type CannotEnableVmcpForClusterReason string @@ -335,9 +505,18 @@ const ( CannotEnableVmcpForClusterReasonAPDTimeoutDisabled = CannotEnableVmcpForClusterReason("APDTimeoutDisabled") ) +func (e CannotEnableVmcpForClusterReason) Values() []CannotEnableVmcpForClusterReason { + return []CannotEnableVmcpForClusterReason{ + CannotEnableVmcpForClusterReasonAPDTimeoutDisabled, + } +} + +func (e CannotEnableVmcpForClusterReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CannotEnableVmcpForClusterReason"] = reflect.TypeOf((*CannotEnableVmcpForClusterReason)(nil)).Elem() - minAPIVersionForType["CannotEnableVmcpForClusterReason"] = "6.0" } type CannotMoveFaultToleranceVmMoveType string @@ -349,9 +528,19 @@ const ( CannotMoveFaultToleranceVmMoveTypeCluster = CannotMoveFaultToleranceVmMoveType("cluster") ) +func (e CannotMoveFaultToleranceVmMoveType) Values() []CannotMoveFaultToleranceVmMoveType { + return []CannotMoveFaultToleranceVmMoveType{ + CannotMoveFaultToleranceVmMoveTypeResourcePool, + CannotMoveFaultToleranceVmMoveTypeCluster, + } +} + +func (e CannotMoveFaultToleranceVmMoveType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CannotMoveFaultToleranceVmMoveType"] = reflect.TypeOf((*CannotMoveFaultToleranceVmMoveType)(nil)).Elem() - minAPIVersionForType["CannotMoveFaultToleranceVmMoveType"] = "4.0" } type CannotPowerOffVmInClusterOperation string @@ -367,9 +556,21 @@ const ( CannotPowerOffVmInClusterOperationGuestSuspend = CannotPowerOffVmInClusterOperation("guestSuspend") ) +func (e CannotPowerOffVmInClusterOperation) Values() []CannotPowerOffVmInClusterOperation { + return []CannotPowerOffVmInClusterOperation{ + CannotPowerOffVmInClusterOperationSuspend, + CannotPowerOffVmInClusterOperationPowerOff, + CannotPowerOffVmInClusterOperationGuestShutdown, + CannotPowerOffVmInClusterOperationGuestSuspend, + } +} + +func (e CannotPowerOffVmInClusterOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CannotPowerOffVmInClusterOperation"] = reflect.TypeOf((*CannotPowerOffVmInClusterOperation)(nil)).Elem() - minAPIVersionForType["CannotPowerOffVmInClusterOperation"] = "5.0" } type CannotUseNetworkReason string @@ -389,16 +590,27 @@ const ( CannotUseNetworkReasonMismatchedEnsMode = CannotUseNetworkReason("MismatchedEnsMode") ) -func init() { - t["CannotUseNetworkReason"] = reflect.TypeOf((*CannotUseNetworkReason)(nil)).Elem() - minAPIVersionForType["CannotUseNetworkReason"] = "5.5" - minAPIVersionForEnumValue["CannotUseNetworkReason"] = map[string]string{ - "NetworkUnderMaintenance": "7.0", - "MismatchedEnsMode": "7.0", +func (e CannotUseNetworkReason) Values() []CannotUseNetworkReason { + return []CannotUseNetworkReason{ + CannotUseNetworkReasonNetworkReservationNotSupported, + CannotUseNetworkReasonMismatchedNetworkPolicies, + CannotUseNetworkReasonMismatchedDvsVersionOrVendor, + CannotUseNetworkReasonVMotionToUnsupportedNetworkType, + CannotUseNetworkReasonNetworkUnderMaintenance, + CannotUseNetworkReasonMismatchedEnsMode, } } +func (e CannotUseNetworkReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["CannotUseNetworkReason"] = reflect.TypeOf((*CannotUseNetworkReason)(nil)).Elem() +} + // The types of tests which can requested by any of the methods in either +// `VirtualMachineCompatibilityChecker` or `VirtualMachineProvisioningChecker`. type CheckTestType string const ( @@ -433,18 +645,29 @@ const ( CheckTestTypeNetworkTests = CheckTestType("networkTests") ) +func (e CheckTestType) Values() []CheckTestType { + return []CheckTestType{ + CheckTestTypeSourceTests, + CheckTestTypeHostTests, + CheckTestTypeResourcePoolTests, + CheckTestTypeDatastoreTests, + CheckTestTypeNetworkTests, + } +} + +func (e CheckTestType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CheckTestType"] = reflect.TypeOf((*CheckTestType)(nil)).Elem() - minAPIVersionForType["CheckTestType"] = "4.0" - minAPIVersionForEnumValue["CheckTestType"] = map[string]string{ - "networkTests": "5.5", - } } // HCIWorkflowState identifies the state of the cluser from the perspective of HCI // workflow. // // The workflow begins with in\_progress mode and can transition +// to 'done' or 'invalid', both of which are terminal states. type ClusterComputeResourceHCIWorkflowState string const ( @@ -457,9 +680,20 @@ const ( ClusterComputeResourceHCIWorkflowStateInvalid = ClusterComputeResourceHCIWorkflowState("invalid") ) +func (e ClusterComputeResourceHCIWorkflowState) Values() []ClusterComputeResourceHCIWorkflowState { + return []ClusterComputeResourceHCIWorkflowState{ + ClusterComputeResourceHCIWorkflowStateIn_progress, + ClusterComputeResourceHCIWorkflowStateDone, + ClusterComputeResourceHCIWorkflowStateInvalid, + } +} + +func (e ClusterComputeResourceHCIWorkflowState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterComputeResourceHCIWorkflowState"] = reflect.TypeOf((*ClusterComputeResourceHCIWorkflowState)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceHCIWorkflowState"] = "6.7.1" } type ClusterComputeResourceVcsHealthStatus string @@ -473,6 +707,18 @@ const ( ClusterComputeResourceVcsHealthStatusNonhealthy = ClusterComputeResourceVcsHealthStatus("nonhealthy") ) +func (e ClusterComputeResourceVcsHealthStatus) Values() []ClusterComputeResourceVcsHealthStatus { + return []ClusterComputeResourceVcsHealthStatus{ + ClusterComputeResourceVcsHealthStatusHealthy, + ClusterComputeResourceVcsHealthStatusDegraded, + ClusterComputeResourceVcsHealthStatusNonhealthy, + } +} + +func (e ClusterComputeResourceVcsHealthStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterComputeResourceVcsHealthStatus"] = reflect.TypeOf((*ClusterComputeResourceVcsHealthStatus)(nil)).Elem() minAPIVersionForType["ClusterComputeResourceVcsHealthStatus"] = "7.0.1.1" @@ -487,14 +733,25 @@ const ( ClusterCryptoConfigInfoCryptoModeForceEnable = ClusterCryptoConfigInfoCryptoMode("forceEnable") ) +func (e ClusterCryptoConfigInfoCryptoMode) Values() []ClusterCryptoConfigInfoCryptoMode { + return []ClusterCryptoConfigInfoCryptoMode{ + ClusterCryptoConfigInfoCryptoModeOnDemand, + ClusterCryptoConfigInfoCryptoModeForceEnable, + } +} + +func (e ClusterCryptoConfigInfoCryptoMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterCryptoConfigInfoCryptoMode"] = reflect.TypeOf((*ClusterCryptoConfigInfoCryptoMode)(nil)).Elem() - minAPIVersionForType["ClusterCryptoConfigInfoCryptoMode"] = "7.0" } // The `ClusterDasAamNodeStateDasState_enum` enumerated type defines // values for host HA configuration and runtime state properties // (`ClusterDasAamNodeState.configState` and +// `ClusterDasAamNodeState.runtimeState`). type ClusterDasAamNodeStateDasState string const ( @@ -522,12 +779,29 @@ const ( ClusterDasAamNodeStateDasStateNodeFailed = ClusterDasAamNodeStateDasState("nodeFailed") ) +func (e ClusterDasAamNodeStateDasState) Values() []ClusterDasAamNodeStateDasState { + return []ClusterDasAamNodeStateDasState{ + ClusterDasAamNodeStateDasStateUninitialized, + ClusterDasAamNodeStateDasStateInitialized, + ClusterDasAamNodeStateDasStateConfiguring, + ClusterDasAamNodeStateDasStateUnconfiguring, + ClusterDasAamNodeStateDasStateRunning, + ClusterDasAamNodeStateDasStateError, + ClusterDasAamNodeStateDasStateAgentShutdown, + ClusterDasAamNodeStateDasStateNodeFailed, + } +} + +func (e ClusterDasAamNodeStateDasState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterDasAamNodeStateDasState"] = reflect.TypeOf((*ClusterDasAamNodeStateDasState)(nil)).Elem() - minAPIVersionForType["ClusterDasAamNodeStateDasState"] = "4.0" } // The policy to determine the candidates from which vCenter Server can +// choose heartbeat datastores. type ClusterDasConfigInfoHBDatastoreCandidate string const ( @@ -554,14 +828,26 @@ const ( ClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDsWithUserPreference = ClusterDasConfigInfoHBDatastoreCandidate("allFeasibleDsWithUserPreference") ) +func (e ClusterDasConfigInfoHBDatastoreCandidate) Values() []ClusterDasConfigInfoHBDatastoreCandidate { + return []ClusterDasConfigInfoHBDatastoreCandidate{ + ClusterDasConfigInfoHBDatastoreCandidateUserSelectedDs, + ClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDs, + ClusterDasConfigInfoHBDatastoreCandidateAllFeasibleDsWithUserPreference, + } +} + +func (e ClusterDasConfigInfoHBDatastoreCandidate) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterDasConfigInfoHBDatastoreCandidate"] = reflect.TypeOf((*ClusterDasConfigInfoHBDatastoreCandidate)(nil)).Elem() - minAPIVersionForType["ClusterDasConfigInfoHBDatastoreCandidate"] = "5.0" } // Possible states of an HA service. // // All services support the +// disabled and enabled states. type ClusterDasConfigInfoServiceState string const ( @@ -571,9 +857,19 @@ const ( ClusterDasConfigInfoServiceStateEnabled = ClusterDasConfigInfoServiceState("enabled") ) +func (e ClusterDasConfigInfoServiceState) Values() []ClusterDasConfigInfoServiceState { + return []ClusterDasConfigInfoServiceState{ + ClusterDasConfigInfoServiceStateDisabled, + ClusterDasConfigInfoServiceStateEnabled, + } +} + +func (e ClusterDasConfigInfoServiceState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterDasConfigInfoServiceState"] = reflect.TypeOf((*ClusterDasConfigInfoServiceState)(nil)).Elem() - minAPIVersionForType["ClusterDasConfigInfoServiceState"] = "4.0" } // The `ClusterDasConfigInfoVmMonitoringState_enum` enum defines values that indicate @@ -592,6 +888,7 @@ func init() { // property. // - To retrieve the current state of health monitoring for a virtual machine, use the // ClusterConfigInfoEx.dasVmConfig\[\].dasSettings.vmToolsMonitoringSettings.`ClusterVmToolsMonitoringSettings.vmMonitoring` +// property. type ClusterDasConfigInfoVmMonitoringState string const ( @@ -607,18 +904,29 @@ const ( // property. ClusterDasConfigInfoVmMonitoringStateVmMonitoringOnly = ClusterDasConfigInfoVmMonitoringState("vmMonitoringOnly") // HA response to both guest and application heartbeat failure is enabled. - // - To retrieve the guest heartbeat status, use the - // `VirtualMachine*.*VirtualMachine.guestHeartbeatStatus` - // property. - // - To retrieve the application heartbeat status, use the - // `GuestInfo*.*GuestInfo.appHeartbeatStatus` - // property. + // - To retrieve the guest heartbeat status, use the + // `VirtualMachine*.*VirtualMachine.guestHeartbeatStatus` + // property. + // - To retrieve the application heartbeat status, use the + // `GuestInfo*.*GuestInfo.appHeartbeatStatus` + // property. ClusterDasConfigInfoVmMonitoringStateVmAndAppMonitoring = ClusterDasConfigInfoVmMonitoringState("vmAndAppMonitoring") ) +func (e ClusterDasConfigInfoVmMonitoringState) Values() []ClusterDasConfigInfoVmMonitoringState { + return []ClusterDasConfigInfoVmMonitoringState{ + ClusterDasConfigInfoVmMonitoringStateVmMonitoringDisabled, + ClusterDasConfigInfoVmMonitoringStateVmMonitoringOnly, + ClusterDasConfigInfoVmMonitoringStateVmAndAppMonitoring, + } +} + +func (e ClusterDasConfigInfoVmMonitoringState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterDasConfigInfoVmMonitoringState"] = reflect.TypeOf((*ClusterDasConfigInfoVmMonitoringState)(nil)).Elem() - minAPIVersionForType["ClusterDasConfigInfoVmMonitoringState"] = "4.1" } // The `ClusterDasFdmAvailabilityState_enum` enumeration describes the @@ -631,6 +939,7 @@ func init() { // determined from information reported by the Fault Domain Manager // running on the host, by a Fault Domain Manager that has been elected // master, and by vCenter Server. See `ClusterDasFdmHostState` +// for more information about the vSphere HA architecture. type ClusterDasFdmAvailabilityState string const ( @@ -700,15 +1009,15 @@ const ( // // This // state is reported in two unlikely situations. - // - First, it is reported by - // a master if the host responds to ICMP pings sent by the master over the - // management network but the FDM on the host cannot be reached by the master. - // This situation will occur if the FDM is unable to run or exit the - // uninitialized state. - // - Second, it is reported by vCenter Server if it cannot connect to a - // master nor the FDM for the host. This situation would occur if all hosts - // in the cluster failed but vCenter Server is still running. It may also - // occur if all FDMs are unable to run or exit the uninitialized state. + // - First, it is reported by + // a master if the host responds to ICMP pings sent by the master over the + // management network but the FDM on the host cannot be reached by the master. + // This situation will occur if the FDM is unable to run or exit the + // uninitialized state. + // - Second, it is reported by vCenter Server if it cannot connect to a + // master nor the FDM for the host. This situation would occur if all hosts + // in the cluster failed but vCenter Server is still running. It may also + // occur if all FDMs are unable to run or exit the uninitialized state. ClusterDasFdmAvailabilityStateFdmUnreachable = ClusterDasFdmAvailabilityState("fdmUnreachable") // Config/Reconfig/upgrade operation has failed in first attempt and // a retry of these operations is scheduled. @@ -719,9 +1028,28 @@ const ( ClusterDasFdmAvailabilityStateRetry = ClusterDasFdmAvailabilityState("retry") ) +func (e ClusterDasFdmAvailabilityState) Values() []ClusterDasFdmAvailabilityState { + return []ClusterDasFdmAvailabilityState{ + ClusterDasFdmAvailabilityStateUninitialized, + ClusterDasFdmAvailabilityStateElection, + ClusterDasFdmAvailabilityStateMaster, + ClusterDasFdmAvailabilityStateConnectedToMaster, + ClusterDasFdmAvailabilityStateNetworkPartitionedFromMaster, + ClusterDasFdmAvailabilityStateNetworkIsolated, + ClusterDasFdmAvailabilityStateHostDown, + ClusterDasFdmAvailabilityStateInitializationError, + ClusterDasFdmAvailabilityStateUninitializationError, + ClusterDasFdmAvailabilityStateFdmUnreachable, + ClusterDasFdmAvailabilityStateRetry, + } +} + +func (e ClusterDasFdmAvailabilityState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterDasFdmAvailabilityState"] = reflect.TypeOf((*ClusterDasFdmAvailabilityState)(nil)).Elem() - minAPIVersionForType["ClusterDasFdmAvailabilityState"] = "5.0" minAPIVersionForEnumValue["ClusterDasFdmAvailabilityState"] = map[string]string{ "retry": "8.0.0.0", } @@ -765,6 +1093,7 @@ func init() { // // If you ensure that your network infrastructure is sufficiently redundant // and that at least one network path is available at all times, host network +// isolation should be a rare occurrence. type ClusterDasVmSettingsIsolationResponse string const ( @@ -791,12 +1120,21 @@ const ( ClusterDasVmSettingsIsolationResponseClusterIsolationResponse = ClusterDasVmSettingsIsolationResponse("clusterIsolationResponse") ) +func (e ClusterDasVmSettingsIsolationResponse) Values() []ClusterDasVmSettingsIsolationResponse { + return []ClusterDasVmSettingsIsolationResponse{ + ClusterDasVmSettingsIsolationResponseNone, + ClusterDasVmSettingsIsolationResponsePowerOff, + ClusterDasVmSettingsIsolationResponseShutdown, + ClusterDasVmSettingsIsolationResponseClusterIsolationResponse, + } +} + +func (e ClusterDasVmSettingsIsolationResponse) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterDasVmSettingsIsolationResponse"] = reflect.TypeOf((*ClusterDasVmSettingsIsolationResponse)(nil)).Elem() - minAPIVersionForType["ClusterDasVmSettingsIsolationResponse"] = "2.5" - minAPIVersionForEnumValue["ClusterDasVmSettingsIsolationResponse"] = map[string]string{ - "shutdown": "2.5u2", - } } // The `ClusterDasVmSettingsRestartPriority_enum` enum defines @@ -811,6 +1149,7 @@ func init() { // single virtual machine HA configuration (`ClusterDasVmConfigInfo.dasSettings`). // All values except for clusterRestartPriority are valid for // the cluster-wide default HA configuration for virtual machines +// (`ClusterDasConfigInfo.defaultVmSettings`). type ClusterDasVmSettingsRestartPriority string const ( @@ -841,19 +1180,31 @@ const ( ClusterDasVmSettingsRestartPriorityClusterRestartPriority = ClusterDasVmSettingsRestartPriority("clusterRestartPriority") ) +func (e ClusterDasVmSettingsRestartPriority) Values() []ClusterDasVmSettingsRestartPriority { + return []ClusterDasVmSettingsRestartPriority{ + ClusterDasVmSettingsRestartPriorityDisabled, + ClusterDasVmSettingsRestartPriorityLowest, + ClusterDasVmSettingsRestartPriorityLow, + ClusterDasVmSettingsRestartPriorityMedium, + ClusterDasVmSettingsRestartPriorityHigh, + ClusterDasVmSettingsRestartPriorityHighest, + ClusterDasVmSettingsRestartPriorityClusterRestartPriority, + } +} + +func (e ClusterDasVmSettingsRestartPriority) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterDasVmSettingsRestartPriority"] = reflect.TypeOf((*ClusterDasVmSettingsRestartPriority)(nil)).Elem() - minAPIVersionForType["ClusterDasVmSettingsRestartPriority"] = "2.5" - minAPIVersionForEnumValue["ClusterDasVmSettingsRestartPriority"] = map[string]string{ - "lowest": "6.5", - "highest": "6.5", - } } // Describes the operation type of the action. // // enterexitQuarantine suggests // that the host is only exiting the quarantine state (i.e. not the +// maintenance mode). type ClusterHostInfraUpdateHaModeActionOperationType string const ( @@ -862,9 +1213,20 @@ const ( ClusterHostInfraUpdateHaModeActionOperationTypeEnterMaintenance = ClusterHostInfraUpdateHaModeActionOperationType("enterMaintenance") ) +func (e ClusterHostInfraUpdateHaModeActionOperationType) Values() []ClusterHostInfraUpdateHaModeActionOperationType { + return []ClusterHostInfraUpdateHaModeActionOperationType{ + ClusterHostInfraUpdateHaModeActionOperationTypeEnterQuarantine, + ClusterHostInfraUpdateHaModeActionOperationTypeExitQuarantine, + ClusterHostInfraUpdateHaModeActionOperationTypeEnterMaintenance, + } +} + +func (e ClusterHostInfraUpdateHaModeActionOperationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterHostInfraUpdateHaModeActionOperationType"] = reflect.TypeOf((*ClusterHostInfraUpdateHaModeActionOperationType)(nil)).Elem() - minAPIVersionForType["ClusterHostInfraUpdateHaModeActionOperationType"] = "6.5" } type ClusterInfraUpdateHaConfigInfoBehaviorType string @@ -878,9 +1240,19 @@ const ( ClusterInfraUpdateHaConfigInfoBehaviorTypeAutomated = ClusterInfraUpdateHaConfigInfoBehaviorType("Automated") ) +func (e ClusterInfraUpdateHaConfigInfoBehaviorType) Values() []ClusterInfraUpdateHaConfigInfoBehaviorType { + return []ClusterInfraUpdateHaConfigInfoBehaviorType{ + ClusterInfraUpdateHaConfigInfoBehaviorTypeManual, + ClusterInfraUpdateHaConfigInfoBehaviorTypeAutomated, + } +} + +func (e ClusterInfraUpdateHaConfigInfoBehaviorType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterInfraUpdateHaConfigInfoBehaviorType"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfoBehaviorType)(nil)).Elem() - minAPIVersionForType["ClusterInfraUpdateHaConfigInfoBehaviorType"] = "6.5" } type ClusterInfraUpdateHaConfigInfoRemediationType string @@ -894,11 +1266,22 @@ const ( ClusterInfraUpdateHaConfigInfoRemediationTypeMaintenanceMode = ClusterInfraUpdateHaConfigInfoRemediationType("MaintenanceMode") ) -func init() { - t["ClusterInfraUpdateHaConfigInfoRemediationType"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfoRemediationType)(nil)).Elem() - minAPIVersionForType["ClusterInfraUpdateHaConfigInfoRemediationType"] = "6.5" +func (e ClusterInfraUpdateHaConfigInfoRemediationType) Values() []ClusterInfraUpdateHaConfigInfoRemediationType { + return []ClusterInfraUpdateHaConfigInfoRemediationType{ + ClusterInfraUpdateHaConfigInfoRemediationTypeQuarantineMode, + ClusterInfraUpdateHaConfigInfoRemediationTypeMaintenanceMode, + } } +func (e ClusterInfraUpdateHaConfigInfoRemediationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ClusterInfraUpdateHaConfigInfoRemediationType"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfoRemediationType)(nil)).Elem() +} + +// Defines the options for a Datacenter::powerOnVm() invocation. type ClusterPowerOnVmOption string const ( @@ -925,11 +1308,22 @@ const ( ClusterPowerOnVmOptionReserveResources = ClusterPowerOnVmOption("ReserveResources") ) -func init() { - t["ClusterPowerOnVmOption"] = reflect.TypeOf((*ClusterPowerOnVmOption)(nil)).Elem() - minAPIVersionForType["ClusterPowerOnVmOption"] = "4.1" +func (e ClusterPowerOnVmOption) Values() []ClusterPowerOnVmOption { + return []ClusterPowerOnVmOption{ + ClusterPowerOnVmOptionOverrideAutomationLevel, + ClusterPowerOnVmOptionReserveResources, + } } +func (e ClusterPowerOnVmOption) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ClusterPowerOnVmOption"] = reflect.TypeOf((*ClusterPowerOnVmOption)(nil)).Elem() +} + +// Type of services for which Profile can be requested for type ClusterProfileServiceType string const ( @@ -943,9 +1337,21 @@ const ( ClusterProfileServiceTypeFT = ClusterProfileServiceType("FT") ) +func (e ClusterProfileServiceType) Values() []ClusterProfileServiceType { + return []ClusterProfileServiceType{ + ClusterProfileServiceTypeDRS, + ClusterProfileServiceTypeHA, + ClusterProfileServiceTypeDPM, + ClusterProfileServiceTypeFT, + } +} + +func (e ClusterProfileServiceType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterProfileServiceType"] = reflect.TypeOf((*ClusterProfileServiceType)(nil)).Elem() - minAPIVersionForType["ClusterProfileServiceType"] = "4.0" } type ClusterSystemVMsConfigInfoDeploymentMode string @@ -957,12 +1363,24 @@ const ( ClusterSystemVMsConfigInfoDeploymentModeABSENT = ClusterSystemVMsConfigInfoDeploymentMode("ABSENT") ) +func (e ClusterSystemVMsConfigInfoDeploymentMode) Values() []ClusterSystemVMsConfigInfoDeploymentMode { + return []ClusterSystemVMsConfigInfoDeploymentMode{ + ClusterSystemVMsConfigInfoDeploymentModeSYSTEM_MANAGED, + ClusterSystemVMsConfigInfoDeploymentModeABSENT, + } +} + +func (e ClusterSystemVMsConfigInfoDeploymentMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterSystemVMsConfigInfoDeploymentMode"] = reflect.TypeOf((*ClusterSystemVMsConfigInfoDeploymentMode)(nil)).Elem() minAPIVersionForType["ClusterSystemVMsConfigInfoDeploymentMode"] = "8.0.2.0" } // The VM policy settings that determine the response to +// storage failures. type ClusterVmComponentProtectionSettingsStorageVmReaction string const ( @@ -998,9 +1416,22 @@ const ( ClusterVmComponentProtectionSettingsStorageVmReactionClusterDefault = ClusterVmComponentProtectionSettingsStorageVmReaction("clusterDefault") ) +func (e ClusterVmComponentProtectionSettingsStorageVmReaction) Values() []ClusterVmComponentProtectionSettingsStorageVmReaction { + return []ClusterVmComponentProtectionSettingsStorageVmReaction{ + ClusterVmComponentProtectionSettingsStorageVmReactionDisabled, + ClusterVmComponentProtectionSettingsStorageVmReactionWarning, + ClusterVmComponentProtectionSettingsStorageVmReactionRestartConservative, + ClusterVmComponentProtectionSettingsStorageVmReactionRestartAggressive, + ClusterVmComponentProtectionSettingsStorageVmReactionClusterDefault, + } +} + +func (e ClusterVmComponentProtectionSettingsStorageVmReaction) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterVmComponentProtectionSettingsStorageVmReaction"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsStorageVmReaction)(nil)).Elem() - minAPIVersionForType["ClusterVmComponentProtectionSettingsStorageVmReaction"] = "6.0" } // If an APD condition clears after an APD timeout condition has been declared and before @@ -1008,6 +1439,7 @@ func init() { // no longer be operational. // // VM Component Protection may be configured to reset the +// VM (`VirtualMachine.ResetVM_Task`) to restore the service of guest applications. type ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared string const ( @@ -1024,11 +1456,23 @@ const ( ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedUseClusterDefault = ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared("useClusterDefault") ) -func init() { - t["ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared)(nil)).Elem() - minAPIVersionForType["ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared"] = "6.0" +func (e ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared) Values() []ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared { + return []ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared{ + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedNone, + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedReset, + ClusterVmComponentProtectionSettingsVmReactionOnAPDClearedUseClusterDefault, + } } +func (e ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared"] = reflect.TypeOf((*ClusterVmComponentProtectionSettingsVmReactionOnAPDCleared)(nil)).Elem() +} + +// Condition for VM's readiness type ClusterVmReadinessReadyCondition string const ( @@ -1057,9 +1501,22 @@ const ( ClusterVmReadinessReadyConditionUseClusterDefault = ClusterVmReadinessReadyCondition("useClusterDefault") ) +func (e ClusterVmReadinessReadyCondition) Values() []ClusterVmReadinessReadyCondition { + return []ClusterVmReadinessReadyCondition{ + ClusterVmReadinessReadyConditionNone, + ClusterVmReadinessReadyConditionPoweredOn, + ClusterVmReadinessReadyConditionGuestHbStatusGreen, + ClusterVmReadinessReadyConditionAppHbStatusGreen, + ClusterVmReadinessReadyConditionUseClusterDefault, + } +} + +func (e ClusterVmReadinessReadyCondition) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ClusterVmReadinessReadyCondition"] = reflect.TypeOf((*ClusterVmReadinessReadyCondition)(nil)).Elem() - minAPIVersionForType["ClusterVmReadinessReadyCondition"] = "6.5" } type ComplianceResultStatus string @@ -1075,14 +1532,24 @@ const ( ComplianceResultStatusRunning = ComplianceResultStatus("running") ) -func init() { - t["ComplianceResultStatus"] = reflect.TypeOf((*ComplianceResultStatus)(nil)).Elem() - minAPIVersionForType["ComplianceResultStatus"] = "4.0" - minAPIVersionForEnumValue["ComplianceResultStatus"] = map[string]string{ - "running": "6.7", +func (e ComplianceResultStatus) Values() []ComplianceResultStatus { + return []ComplianceResultStatus{ + ComplianceResultStatusCompliant, + ComplianceResultStatusNonCompliant, + ComplianceResultStatusUnknown, + ComplianceResultStatusRunning, } } +func (e ComplianceResultStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ComplianceResultStatus"] = reflect.TypeOf((*ComplianceResultStatus)(nil)).Elem() +} + +// The SPBM(Storage Policy Based Management) license state for a host type ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState string const ( @@ -1095,11 +1562,23 @@ const ( ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnknown = ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState("unknown") ) -func init() { - t["ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState)(nil)).Elem() - minAPIVersionForType["ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState"] = "5.0" +func (e ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState) Values() []ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState { + return []ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState{ + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateLicensed, + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnlicensed, + ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseStateUnknown, + } } +func (e ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState)(nil)).Elem() +} + +// Config spec operation type. type ConfigSpecOperation string const ( @@ -1111,9 +1590,20 @@ const ( ConfigSpecOperationRemove = ConfigSpecOperation("remove") ) +func (e ConfigSpecOperation) Values() []ConfigSpecOperation { + return []ConfigSpecOperation{ + ConfigSpecOperationAdd, + ConfigSpecOperationEdit, + ConfigSpecOperationRemove, + } +} + +func (e ConfigSpecOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ConfigSpecOperation"] = reflect.TypeOf((*ConfigSpecOperation)(nil)).Elem() - minAPIVersionForType["ConfigSpecOperation"] = "4.0" } type CryptoManagerHostKeyManagementType string @@ -1124,6 +1614,18 @@ const ( CryptoManagerHostKeyManagementTypeExternal = CryptoManagerHostKeyManagementType("external") ) +func (e CryptoManagerHostKeyManagementType) Values() []CryptoManagerHostKeyManagementType { + return []CryptoManagerHostKeyManagementType{ + CryptoManagerHostKeyManagementTypeUnknown, + CryptoManagerHostKeyManagementTypeInternal, + CryptoManagerHostKeyManagementTypeExternal, + } +} + +func (e CryptoManagerHostKeyManagementType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CryptoManagerHostKeyManagementType"] = reflect.TypeOf((*CryptoManagerHostKeyManagementType)(nil)).Elem() minAPIVersionForType["CryptoManagerHostKeyManagementType"] = "8.0.1.0" @@ -1144,13 +1646,34 @@ const ( CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateNotActiveOrEnabled = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateNotActiveOrEnabled") // Key is managed by Trust Authority CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateManagedByTrustAuthority = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateManagedByTrustAuthority") + // Key is managed by Native Key Provider + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateManagedByNKP = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("KeyStateManagedByNKP") + // No permission to access key provider + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonNoPermissionToAccessKeyProvider = CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason("NoPermissionToAccessKeyProvider") ) +func (e CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason) Values() []CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason { + return []CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason{ + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateMissingInCache, + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateClusterInvalid, + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateClusterUnreachable, + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateMissingInKMS, + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateNotActiveOrEnabled, + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateManagedByTrustAuthority, + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonKeyStateManagedByNKP, + CryptoManagerKmipCryptoKeyStatusKeyUnavailableReasonNoPermissionToAccessKeyProvider, + } +} + +func (e CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason"] = reflect.TypeOf((*CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason)(nil)).Elem() - minAPIVersionForType["CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason"] = "6.7.2" minAPIVersionForEnumValue["CryptoManagerKmipCryptoKeyStatusKeyUnavailableReason"] = map[string]string{ - "KeyStateManagedByTrustAuthority": "7.0", + "KeyStateManagedByNKP": "8.0.3.0", + "NoPermissionToAccessKeyProvider": "8.0.3.0", } } @@ -1167,9 +1690,21 @@ const ( CustomizationFailedReasonCodeWrongMetadataFormat = CustomizationFailedReasonCode("wrongMetadataFormat") ) +func (e CustomizationFailedReasonCode) Values() []CustomizationFailedReasonCode { + return []CustomizationFailedReasonCode{ + CustomizationFailedReasonCodeUserDefinedScriptDisabled, + CustomizationFailedReasonCodeCustomizationDisabled, + CustomizationFailedReasonCodeRawDataIsNotSupported, + CustomizationFailedReasonCodeWrongMetadataFormat, + } +} + +func (e CustomizationFailedReasonCode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CustomizationFailedReasonCode"] = reflect.TypeOf((*CustomizationFailedReasonCode)(nil)).Elem() - minAPIVersionForType["CustomizationFailedReasonCode"] = "7.0" minAPIVersionForEnumValue["CustomizationFailedReasonCode"] = map[string]string{ "customizationDisabled": "7.0.1.0", "rawDataIsNotSupported": "7.0.3.0", @@ -1190,6 +1725,17 @@ const ( CustomizationLicenseDataModePerSeat = CustomizationLicenseDataMode("perSeat") ) +func (e CustomizationLicenseDataMode) Values() []CustomizationLicenseDataMode { + return []CustomizationLicenseDataMode{ + CustomizationLicenseDataModePerServer, + CustomizationLicenseDataModePerSeat, + } +} + +func (e CustomizationLicenseDataMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CustomizationLicenseDataMode"] = reflect.TypeOf((*CustomizationLicenseDataMode)(nil)).Elem() } @@ -1206,11 +1752,24 @@ const ( CustomizationNetBIOSModeDisableNetBIOS = CustomizationNetBIOSMode("disableNetBIOS") ) +func (e CustomizationNetBIOSMode) Values() []CustomizationNetBIOSMode { + return []CustomizationNetBIOSMode{ + CustomizationNetBIOSModeEnableNetBIOSViaDhcp, + CustomizationNetBIOSModeEnableNetBIOS, + CustomizationNetBIOSModeDisableNetBIOS, + } +} + +func (e CustomizationNetBIOSMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CustomizationNetBIOSMode"] = reflect.TypeOf((*CustomizationNetBIOSMode)(nil)).Elem() } // A enum constant specifying what should be done to the guest vm after running +// sysprep. type CustomizationSysprepRebootOption string const ( @@ -1232,12 +1791,24 @@ const ( CustomizationSysprepRebootOptionShutdown = CustomizationSysprepRebootOption("shutdown") ) +func (e CustomizationSysprepRebootOption) Values() []CustomizationSysprepRebootOption { + return []CustomizationSysprepRebootOption{ + CustomizationSysprepRebootOptionReboot, + CustomizationSysprepRebootOptionNoreboot, + CustomizationSysprepRebootOptionShutdown, + } +} + +func (e CustomizationSysprepRebootOption) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["CustomizationSysprepRebootOption"] = reflect.TypeOf((*CustomizationSysprepRebootOption)(nil)).Elem() - minAPIVersionForType["CustomizationSysprepRebootOption"] = "2.5" } // Set of possible values for +// `DVPortStatus*.*DVPortStatus.vmDirectPathGen2InactiveReasonNetwork`. type DVPortStatusVmDirectPathGen2InactiveReasonNetwork string const ( @@ -1259,12 +1830,25 @@ const ( DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptDisabledForPort = DVPortStatusVmDirectPathGen2InactiveReasonNetwork("portNptDisabledForPort") ) +func (e DVPortStatusVmDirectPathGen2InactiveReasonNetwork) Values() []DVPortStatusVmDirectPathGen2InactiveReasonNetwork { + return []DVPortStatusVmDirectPathGen2InactiveReasonNetwork{ + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptIncompatibleDvs, + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptNoCompatibleNics, + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptNoVirtualFunctionsAvailable, + DVPortStatusVmDirectPathGen2InactiveReasonNetworkPortNptDisabledForPort, + } +} + +func (e DVPortStatusVmDirectPathGen2InactiveReasonNetwork) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DVPortStatusVmDirectPathGen2InactiveReasonNetwork"] = reflect.TypeOf((*DVPortStatusVmDirectPathGen2InactiveReasonNetwork)(nil)).Elem() - minAPIVersionForType["DVPortStatusVmDirectPathGen2InactiveReasonNetwork"] = "4.1" } // Set of possible values for +// `DVPortStatus*.*DVPortStatus.vmDirectPathGen2InactiveReasonOther`. type DVPortStatusVmDirectPathGen2InactiveReasonOther string const ( @@ -1284,9 +1868,67 @@ const ( DVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleConnectee = DVPortStatusVmDirectPathGen2InactiveReasonOther("portNptIncompatibleConnectee") ) +func (e DVPortStatusVmDirectPathGen2InactiveReasonOther) Values() []DVPortStatusVmDirectPathGen2InactiveReasonOther { + return []DVPortStatusVmDirectPathGen2InactiveReasonOther{ + DVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleHost, + DVPortStatusVmDirectPathGen2InactiveReasonOtherPortNptIncompatibleConnectee, + } +} + +func (e DVPortStatusVmDirectPathGen2InactiveReasonOther) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DVPortStatusVmDirectPathGen2InactiveReasonOther"] = reflect.TypeOf((*DVPortStatusVmDirectPathGen2InactiveReasonOther)(nil)).Elem() - minAPIVersionForType["DVPortStatusVmDirectPathGen2InactiveReasonOther"] = "4.1" +} + +type DVSFilterSpecLinkConfig string + +const ( + // The port link state: blocked. + DVSFilterSpecLinkConfigBlocked = DVSFilterSpecLinkConfig("blocked") + // The port link state: unblocked. + DVSFilterSpecLinkConfigUnblocked = DVSFilterSpecLinkConfig("unblocked") +) + +func (e DVSFilterSpecLinkConfig) Values() []DVSFilterSpecLinkConfig { + return []DVSFilterSpecLinkConfig{ + DVSFilterSpecLinkConfigBlocked, + DVSFilterSpecLinkConfigUnblocked, + } +} + +func (e DVSFilterSpecLinkConfig) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["DVSFilterSpecLinkConfig"] = reflect.TypeOf((*DVSFilterSpecLinkConfig)(nil)).Elem() +} + +type DVSFilterSpecLinkState string + +const ( + // The port link state: down. + DVSFilterSpecLinkStateDown = DVSFilterSpecLinkState("down") + // The port link state: up. + DVSFilterSpecLinkStateUp = DVSFilterSpecLinkState("up") +) + +func (e DVSFilterSpecLinkState) Values() []DVSFilterSpecLinkState { + return []DVSFilterSpecLinkState{ + DVSFilterSpecLinkStateDown, + DVSFilterSpecLinkStateUp, + } +} + +func (e DVSFilterSpecLinkState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["DVSFilterSpecLinkState"] = reflect.TypeOf((*DVSFilterSpecLinkState)(nil)).Elem() } type DVSMacLimitPolicyType string @@ -1296,9 +1938,19 @@ const ( DVSMacLimitPolicyTypeDrop = DVSMacLimitPolicyType("drop") ) +func (e DVSMacLimitPolicyType) Values() []DVSMacLimitPolicyType { + return []DVSMacLimitPolicyType{ + DVSMacLimitPolicyTypeAllow, + DVSMacLimitPolicyTypeDrop, + } +} + +func (e DVSMacLimitPolicyType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DVSMacLimitPolicyType"] = reflect.TypeOf((*DVSMacLimitPolicyType)(nil)).Elem() - minAPIVersionForType["DVSMacLimitPolicyType"] = "6.7" } type DasConfigFaultDasConfigFaultReason string @@ -1330,17 +1982,28 @@ const ( DasConfigFaultDasConfigFaultReasonApplyHAVibsOnClusterFailed = DasConfigFaultDasConfigFaultReason("ApplyHAVibsOnClusterFailed") ) +func (e DasConfigFaultDasConfigFaultReason) Values() []DasConfigFaultDasConfigFaultReason { + return []DasConfigFaultDasConfigFaultReason{ + DasConfigFaultDasConfigFaultReasonHostNetworkMisconfiguration, + DasConfigFaultDasConfigFaultReasonHostMisconfiguration, + DasConfigFaultDasConfigFaultReasonInsufficientPrivileges, + DasConfigFaultDasConfigFaultReasonNoPrimaryAgentAvailable, + DasConfigFaultDasConfigFaultReasonOther, + DasConfigFaultDasConfigFaultReasonNoDatastoresConfigured, + DasConfigFaultDasConfigFaultReasonCreateConfigVvolFailed, + DasConfigFaultDasConfigFaultReasonVSanNotSupportedOnHost, + DasConfigFaultDasConfigFaultReasonDasNetworkMisconfiguration, + DasConfigFaultDasConfigFaultReasonSetDesiredImageSpecFailed, + DasConfigFaultDasConfigFaultReasonApplyHAVibsOnClusterFailed, + } +} + +func (e DasConfigFaultDasConfigFaultReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DasConfigFaultDasConfigFaultReason"] = reflect.TypeOf((*DasConfigFaultDasConfigFaultReason)(nil)).Elem() - minAPIVersionForType["DasConfigFaultDasConfigFaultReason"] = "4.0" - minAPIVersionForEnumValue["DasConfigFaultDasConfigFaultReason"] = map[string]string{ - "NoDatastoresConfigured": "5.1", - "CreateConfigVvolFailed": "6.0", - "VSanNotSupportedOnHost": "5.5", - "DasNetworkMisconfiguration": "6.0", - "SetDesiredImageSpecFailed": "7.0", - "ApplyHAVibsOnClusterFailed": "7.0", - } } // Deprecated as of VI API 2.5, use `ClusterDasVmSettingsRestartPriority_enum`. @@ -1371,6 +2034,19 @@ const ( DasVmPriorityHigh = DasVmPriority("high") ) +func (e DasVmPriority) Values() []DasVmPriority { + return []DasVmPriority{ + DasVmPriorityDisabled, + DasVmPriorityLow, + DasVmPriorityMedium, + DasVmPriorityHigh, + } +} + +func (e DasVmPriority) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DasVmPriority"] = reflect.TypeOf((*DasVmPriority)(nil)).Elem() } @@ -1384,11 +2060,22 @@ const ( DatastoreAccessibleFalse = DatastoreAccessible("False") ) -func init() { - t["DatastoreAccessible"] = reflect.TypeOf((*DatastoreAccessible)(nil)).Elem() - minAPIVersionForType["DatastoreAccessible"] = "4.0" +func (e DatastoreAccessible) Values() []DatastoreAccessible { + return []DatastoreAccessible{ + DatastoreAccessibleTrue, + DatastoreAccessibleFalse, + } } +func (e DatastoreAccessible) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["DatastoreAccessible"] = reflect.TypeOf((*DatastoreAccessible)(nil)).Elem() +} + +// Defines the current maintenance mode state of the datastore. type DatastoreSummaryMaintenanceModeState string const ( @@ -1403,9 +2090,20 @@ const ( DatastoreSummaryMaintenanceModeStateInMaintenance = DatastoreSummaryMaintenanceModeState("inMaintenance") ) +func (e DatastoreSummaryMaintenanceModeState) Values() []DatastoreSummaryMaintenanceModeState { + return []DatastoreSummaryMaintenanceModeState{ + DatastoreSummaryMaintenanceModeStateNormal, + DatastoreSummaryMaintenanceModeStateEnteringMaintenance, + DatastoreSummaryMaintenanceModeStateInMaintenance, + } +} + +func (e DatastoreSummaryMaintenanceModeState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DatastoreSummaryMaintenanceModeState"] = reflect.TypeOf((*DatastoreSummaryMaintenanceModeState)(nil)).Elem() - minAPIVersionForType["DatastoreSummaryMaintenanceModeState"] = "5.0" } type DayOfWeek string @@ -1420,10 +2118,27 @@ const ( DayOfWeekSaturday = DayOfWeek("saturday") ) +func (e DayOfWeek) Values() []DayOfWeek { + return []DayOfWeek{ + DayOfWeekSunday, + DayOfWeekMonday, + DayOfWeekTuesday, + DayOfWeekWednesday, + DayOfWeekThursday, + DayOfWeekFriday, + DayOfWeekSaturday, + } +} + +func (e DayOfWeek) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DayOfWeek"] = reflect.TypeOf((*DayOfWeek)(nil)).Elem() } +// Reasons why a virtual device would not be supported on a host. type DeviceNotSupportedReason string const ( @@ -1432,11 +2147,28 @@ const ( // The device is supported by the host in general, but not for // the specific guest OS the virtual machine is using. DeviceNotSupportedReasonGuest = DeviceNotSupportedReason("guest") + // The device is supported by the host and guest OS, but not for + // the vSphere Fault Tolerance. + DeviceNotSupportedReasonFt = DeviceNotSupportedReason("ft") ) +func (e DeviceNotSupportedReason) Values() []DeviceNotSupportedReason { + return []DeviceNotSupportedReason{ + DeviceNotSupportedReasonHost, + DeviceNotSupportedReasonGuest, + DeviceNotSupportedReasonFt, + } +} + +func (e DeviceNotSupportedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DeviceNotSupportedReason"] = reflect.TypeOf((*DeviceNotSupportedReason)(nil)).Elem() - minAPIVersionForType["DeviceNotSupportedReason"] = "2.5" + minAPIVersionForEnumValue["DeviceNotSupportedReason"] = map[string]string{ + "ft": "8.0.3.0", + } } // The list of Device Protocols. @@ -1447,6 +2179,17 @@ const ( DeviceProtocolSCSI = DeviceProtocol("SCSI") ) +func (e DeviceProtocol) Values() []DeviceProtocol { + return []DeviceProtocol{ + DeviceProtocolNVMe, + DeviceProtocolSCSI, + } +} + +func (e DeviceProtocol) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DeviceProtocol"] = reflect.TypeOf((*DeviceProtocol)(nil)).Elem() minAPIVersionForType["DeviceProtocol"] = "8.0.1.0" @@ -1472,11 +2215,24 @@ const ( DiagnosticManagerLogCreatorRecordLog = DiagnosticManagerLogCreator("recordLog") ) +func (e DiagnosticManagerLogCreator) Values() []DiagnosticManagerLogCreator { + return []DiagnosticManagerLogCreator{ + DiagnosticManagerLogCreatorVpxd, + DiagnosticManagerLogCreatorVpxa, + DiagnosticManagerLogCreatorHostd, + DiagnosticManagerLogCreatorServerd, + DiagnosticManagerLogCreatorInstall, + DiagnosticManagerLogCreatorVpxClient, + DiagnosticManagerLogCreatorRecordLog, + } +} + +func (e DiagnosticManagerLogCreator) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DiagnosticManagerLogCreator"] = reflect.TypeOf((*DiagnosticManagerLogCreator)(nil)).Elem() - minAPIVersionForEnumValue["DiagnosticManagerLogCreator"] = map[string]string{ - "recordLog": "2.5", - } } // Constants for defined formats. @@ -1489,6 +2245,16 @@ const ( DiagnosticManagerLogFormatPlain = DiagnosticManagerLogFormat("plain") ) +func (e DiagnosticManagerLogFormat) Values() []DiagnosticManagerLogFormat { + return []DiagnosticManagerLogFormat{ + DiagnosticManagerLogFormatPlain, + } +} + +func (e DiagnosticManagerLogFormat) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DiagnosticManagerLogFormat"] = reflect.TypeOf((*DiagnosticManagerLogFormat)(nil)).Elem() } @@ -1506,6 +2272,17 @@ const ( DiagnosticPartitionStorageTypeNetworkAttached = DiagnosticPartitionStorageType("networkAttached") ) +func (e DiagnosticPartitionStorageType) Values() []DiagnosticPartitionStorageType { + return []DiagnosticPartitionStorageType{ + DiagnosticPartitionStorageTypeDirectAttached, + DiagnosticPartitionStorageTypeNetworkAttached, + } +} + +func (e DiagnosticPartitionStorageType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DiagnosticPartitionStorageType"] = reflect.TypeOf((*DiagnosticPartitionStorageType)(nil)).Elem() } @@ -1522,10 +2299,22 @@ const ( DiagnosticPartitionTypeMultiHost = DiagnosticPartitionType("multiHost") ) +func (e DiagnosticPartitionType) Values() []DiagnosticPartitionType { + return []DiagnosticPartitionType{ + DiagnosticPartitionTypeSingleHost, + DiagnosticPartitionTypeMultiHost, + } +} + +func (e DiagnosticPartitionType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DiagnosticPartitionType"] = reflect.TypeOf((*DiagnosticPartitionType)(nil)).Elem() } +// The disallowed change type. type DisallowedChangeByServiceDisallowedChange string const ( @@ -1533,12 +2322,22 @@ const ( DisallowedChangeByServiceDisallowedChangeHotExtendDisk = DisallowedChangeByServiceDisallowedChange("hotExtendDisk") ) +func (e DisallowedChangeByServiceDisallowedChange) Values() []DisallowedChangeByServiceDisallowedChange { + return []DisallowedChangeByServiceDisallowedChange{ + DisallowedChangeByServiceDisallowedChangeHotExtendDisk, + } +} + +func (e DisallowedChangeByServiceDisallowedChange) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DisallowedChangeByServiceDisallowedChange"] = reflect.TypeOf((*DisallowedChangeByServiceDisallowedChange)(nil)).Elem() - minAPIVersionForType["DisallowedChangeByServiceDisallowedChange"] = "5.0" } // The `DistributedVirtualPortgroupBackingType_enum` enum defines +// the distributed virtual portgroup backing type. type DistributedVirtualPortgroupBackingType string const ( @@ -1554,12 +2353,23 @@ const ( DistributedVirtualPortgroupBackingTypeNsx = DistributedVirtualPortgroupBackingType("nsx") ) +func (e DistributedVirtualPortgroupBackingType) Values() []DistributedVirtualPortgroupBackingType { + return []DistributedVirtualPortgroupBackingType{ + DistributedVirtualPortgroupBackingTypeStandard, + DistributedVirtualPortgroupBackingTypeNsx, + } +} + +func (e DistributedVirtualPortgroupBackingType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DistributedVirtualPortgroupBackingType"] = reflect.TypeOf((*DistributedVirtualPortgroupBackingType)(nil)).Elem() - minAPIVersionForType["DistributedVirtualPortgroupBackingType"] = "7.0" } // The meta tag names recognizable in the +// `DVPortgroupConfigInfo.portNameFormat` string. type DistributedVirtualPortgroupMetaTagName string const ( @@ -1571,9 +2381,20 @@ const ( DistributedVirtualPortgroupMetaTagNamePortIndex = DistributedVirtualPortgroupMetaTagName("portIndex") ) +func (e DistributedVirtualPortgroupMetaTagName) Values() []DistributedVirtualPortgroupMetaTagName { + return []DistributedVirtualPortgroupMetaTagName{ + DistributedVirtualPortgroupMetaTagNameDvsName, + DistributedVirtualPortgroupMetaTagNamePortgroupName, + DistributedVirtualPortgroupMetaTagNamePortIndex, + } +} + +func (e DistributedVirtualPortgroupMetaTagName) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DistributedVirtualPortgroupMetaTagName"] = reflect.TypeOf((*DistributedVirtualPortgroupMetaTagName)(nil)).Elem() - minAPIVersionForType["DistributedVirtualPortgroupMetaTagName"] = "4.0" } // The `DistributedVirtualPortgroupPortgroupType_enum` enum defines @@ -1582,6 +2403,7 @@ func init() { // // Early binding specifies a static set of ports that are created // when you create the distributed virtual portgroup. An ephemeral portgroup uses dynamic +// ports that are created when you power on a virtual machine. type DistributedVirtualPortgroupPortgroupType string const ( @@ -1589,8 +2411,6 @@ const ( // a `VirtualMachine` when the virtual machine is reconfigured to // connect to the portgroup. DistributedVirtualPortgroupPortgroupTypeEarlyBinding = DistributedVirtualPortgroupPortgroupType("earlyBinding") - // - // // Deprecated as of vSphere API 5.0. // // A free `DistributedVirtualPort` will be selected and @@ -1609,11 +2429,23 @@ const ( DistributedVirtualPortgroupPortgroupTypeEphemeral = DistributedVirtualPortgroupPortgroupType("ephemeral") ) -func init() { - t["DistributedVirtualPortgroupPortgroupType"] = reflect.TypeOf((*DistributedVirtualPortgroupPortgroupType)(nil)).Elem() - minAPIVersionForType["DistributedVirtualPortgroupPortgroupType"] = "4.0" +func (e DistributedVirtualPortgroupPortgroupType) Values() []DistributedVirtualPortgroupPortgroupType { + return []DistributedVirtualPortgroupPortgroupType{ + DistributedVirtualPortgroupPortgroupTypeEarlyBinding, + DistributedVirtualPortgroupPortgroupTypeLateBinding, + DistributedVirtualPortgroupPortgroupTypeEphemeral, + } } +func (e DistributedVirtualPortgroupPortgroupType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["DistributedVirtualPortgroupPortgroupType"] = reflect.TypeOf((*DistributedVirtualPortgroupPortgroupType)(nil)).Elem() +} + +// List of possible host infrastructure traffic classes type DistributedVirtualSwitchHostInfrastructureTrafficClass string const ( @@ -1641,16 +2473,35 @@ const ( DistributedVirtualSwitchHostInfrastructureTrafficClassNvmetcp = DistributedVirtualSwitchHostInfrastructureTrafficClass("nvmetcp") ) +func (e DistributedVirtualSwitchHostInfrastructureTrafficClass) Values() []DistributedVirtualSwitchHostInfrastructureTrafficClass { + return []DistributedVirtualSwitchHostInfrastructureTrafficClass{ + DistributedVirtualSwitchHostInfrastructureTrafficClassManagement, + DistributedVirtualSwitchHostInfrastructureTrafficClassFaultTolerance, + DistributedVirtualSwitchHostInfrastructureTrafficClassVmotion, + DistributedVirtualSwitchHostInfrastructureTrafficClassVirtualMachine, + DistributedVirtualSwitchHostInfrastructureTrafficClassISCSI, + DistributedVirtualSwitchHostInfrastructureTrafficClassNfs, + DistributedVirtualSwitchHostInfrastructureTrafficClassHbr, + DistributedVirtualSwitchHostInfrastructureTrafficClassVsan, + DistributedVirtualSwitchHostInfrastructureTrafficClassVdp, + DistributedVirtualSwitchHostInfrastructureTrafficClassBackupNfc, + DistributedVirtualSwitchHostInfrastructureTrafficClassNvmetcp, + } +} + +func (e DistributedVirtualSwitchHostInfrastructureTrafficClass) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DistributedVirtualSwitchHostInfrastructureTrafficClass"] = reflect.TypeOf((*DistributedVirtualSwitchHostInfrastructureTrafficClass)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostInfrastructureTrafficClass"] = "5.5" minAPIVersionForEnumValue["DistributedVirtualSwitchHostInfrastructureTrafficClass"] = map[string]string{ - "vdp": "6.0", "backupNfc": "7.0.1.0", "nvmetcp": "7.0.3.0", } } +// Describes the state of the host proxy switch. type DistributedVirtualSwitchHostMemberHostComponentState string const ( @@ -1669,9 +2520,23 @@ const ( DistributedVirtualSwitchHostMemberHostComponentStateDown = DistributedVirtualSwitchHostMemberHostComponentState("down") ) +func (e DistributedVirtualSwitchHostMemberHostComponentState) Values() []DistributedVirtualSwitchHostMemberHostComponentState { + return []DistributedVirtualSwitchHostMemberHostComponentState{ + DistributedVirtualSwitchHostMemberHostComponentStateUp, + DistributedVirtualSwitchHostMemberHostComponentStatePending, + DistributedVirtualSwitchHostMemberHostComponentStateOutOfSync, + DistributedVirtualSwitchHostMemberHostComponentStateWarning, + DistributedVirtualSwitchHostMemberHostComponentStateDisconnected, + DistributedVirtualSwitchHostMemberHostComponentStateDown, + } +} + +func (e DistributedVirtualSwitchHostMemberHostComponentState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DistributedVirtualSwitchHostMemberHostComponentState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberHostComponentState)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberHostComponentState"] = "4.0" } // Describe the runtime state of the uplink. @@ -1682,10 +2547,22 @@ const ( DistributedVirtualSwitchHostMemberHostUplinkStateStateStandby = DistributedVirtualSwitchHostMemberHostUplinkStateState("standby") ) +func (e DistributedVirtualSwitchHostMemberHostUplinkStateState) Values() []DistributedVirtualSwitchHostMemberHostUplinkStateState { + return []DistributedVirtualSwitchHostMemberHostUplinkStateState{ + DistributedVirtualSwitchHostMemberHostUplinkStateStateActive, + DistributedVirtualSwitchHostMemberHostUplinkStateStateStandby, + } +} + +func (e DistributedVirtualSwitchHostMemberHostUplinkStateState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DistributedVirtualSwitchHostMemberHostUplinkStateState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberHostUplinkStateState)(nil)).Elem() } +// Transport zone type. type DistributedVirtualSwitchHostMemberTransportZoneType string const ( @@ -1695,11 +2572,22 @@ const ( DistributedVirtualSwitchHostMemberTransportZoneTypeOverlay = DistributedVirtualSwitchHostMemberTransportZoneType("overlay") ) -func init() { - t["DistributedVirtualSwitchHostMemberTransportZoneType"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberTransportZoneType)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberTransportZoneType"] = "7.0" +func (e DistributedVirtualSwitchHostMemberTransportZoneType) Values() []DistributedVirtualSwitchHostMemberTransportZoneType { + return []DistributedVirtualSwitchHostMemberTransportZoneType{ + DistributedVirtualSwitchHostMemberTransportZoneTypeVlan, + DistributedVirtualSwitchHostMemberTransportZoneTypeOverlay, + } } +func (e DistributedVirtualSwitchHostMemberTransportZoneType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["DistributedVirtualSwitchHostMemberTransportZoneType"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberTransportZoneType)(nil)).Elem() +} + +// Network resource control version types. type DistributedVirtualSwitchNetworkResourceControlVersion string const ( @@ -1709,15 +2597,26 @@ const ( DistributedVirtualSwitchNetworkResourceControlVersionVersion3 = DistributedVirtualSwitchNetworkResourceControlVersion("version3") ) +func (e DistributedVirtualSwitchNetworkResourceControlVersion) Values() []DistributedVirtualSwitchNetworkResourceControlVersion { + return []DistributedVirtualSwitchNetworkResourceControlVersion{ + DistributedVirtualSwitchNetworkResourceControlVersionVersion2, + DistributedVirtualSwitchNetworkResourceControlVersionVersion3, + } +} + +func (e DistributedVirtualSwitchNetworkResourceControlVersion) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DistributedVirtualSwitchNetworkResourceControlVersion"] = reflect.TypeOf((*DistributedVirtualSwitchNetworkResourceControlVersion)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchNetworkResourceControlVersion"] = "6.0" } // List of possible teaming modes supported by the vNetwork Distributed // Switch. // // The different policy modes define the way traffic is routed +// through the different uplink ports in a team. type DistributedVirtualSwitchNicTeamingPolicyMode string const ( @@ -1738,11 +2637,25 @@ const ( DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_loadbased = DistributedVirtualSwitchNicTeamingPolicyMode("loadbalance_loadbased") ) -func init() { - t["DistributedVirtualSwitchNicTeamingPolicyMode"] = reflect.TypeOf((*DistributedVirtualSwitchNicTeamingPolicyMode)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchNicTeamingPolicyMode"] = "4.1" +func (e DistributedVirtualSwitchNicTeamingPolicyMode) Values() []DistributedVirtualSwitchNicTeamingPolicyMode { + return []DistributedVirtualSwitchNicTeamingPolicyMode{ + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_ip, + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_srcmac, + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_srcid, + DistributedVirtualSwitchNicTeamingPolicyModeFailover_explicit, + DistributedVirtualSwitchNicTeamingPolicyModeLoadbalance_loadbased, + } } +func (e DistributedVirtualSwitchNicTeamingPolicyMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["DistributedVirtualSwitchNicTeamingPolicyMode"] = reflect.TypeOf((*DistributedVirtualSwitchNicTeamingPolicyMode)(nil)).Elem() +} + +// The connectee types. type DistributedVirtualSwitchPortConnecteeConnecteeType string const ( @@ -1758,14 +2671,28 @@ const ( DistributedVirtualSwitchPortConnecteeConnecteeTypeSystemCrxVnic = DistributedVirtualSwitchPortConnecteeConnecteeType("systemCrxVnic") ) +func (e DistributedVirtualSwitchPortConnecteeConnecteeType) Values() []DistributedVirtualSwitchPortConnecteeConnecteeType { + return []DistributedVirtualSwitchPortConnecteeConnecteeType{ + DistributedVirtualSwitchPortConnecteeConnecteeTypePnic, + DistributedVirtualSwitchPortConnecteeConnecteeTypeVmVnic, + DistributedVirtualSwitchPortConnecteeConnecteeTypeHostConsoleVnic, + DistributedVirtualSwitchPortConnecteeConnecteeTypeHostVmkVnic, + DistributedVirtualSwitchPortConnecteeConnecteeTypeSystemCrxVnic, + } +} + +func (e DistributedVirtualSwitchPortConnecteeConnecteeType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DistributedVirtualSwitchPortConnecteeConnecteeType"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnecteeConnecteeType)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchPortConnecteeConnecteeType"] = "4.0" minAPIVersionForEnumValue["DistributedVirtualSwitchPortConnecteeConnecteeType"] = map[string]string{ "systemCrxVnic": "8.0.1.0", } } +// The product spec operation types. type DistributedVirtualSwitchProductSpecOperationType string const ( @@ -1810,9 +2737,22 @@ const ( DistributedVirtualSwitchProductSpecOperationTypeUpdateBundleInfo = DistributedVirtualSwitchProductSpecOperationType("updateBundleInfo") ) +func (e DistributedVirtualSwitchProductSpecOperationType) Values() []DistributedVirtualSwitchProductSpecOperationType { + return []DistributedVirtualSwitchProductSpecOperationType{ + DistributedVirtualSwitchProductSpecOperationTypePreInstall, + DistributedVirtualSwitchProductSpecOperationTypeUpgrade, + DistributedVirtualSwitchProductSpecOperationTypeNotifyAvailableUpgrade, + DistributedVirtualSwitchProductSpecOperationTypeProceedWithUpgrade, + DistributedVirtualSwitchProductSpecOperationTypeUpdateBundleInfo, + } +} + +func (e DistributedVirtualSwitchProductSpecOperationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DistributedVirtualSwitchProductSpecOperationType"] = reflect.TypeOf((*DistributedVirtualSwitchProductSpecOperationType)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchProductSpecOperationType"] = "4.0" } type DpmBehavior string @@ -1828,9 +2768,19 @@ const ( DpmBehaviorAutomated = DpmBehavior("automated") ) +func (e DpmBehavior) Values() []DpmBehavior { + return []DpmBehavior{ + DpmBehaviorManual, + DpmBehaviorAutomated, + } +} + +func (e DpmBehavior) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DpmBehavior"] = reflect.TypeOf((*DpmBehavior)(nil)).Elem() - minAPIVersionForType["DpmBehavior"] = "2.5" } type DrsBehavior string @@ -1849,11 +2799,24 @@ const ( DrsBehaviorFullyAutomated = DrsBehavior("fullyAutomated") ) +func (e DrsBehavior) Values() []DrsBehavior { + return []DrsBehavior{ + DrsBehaviorManual, + DrsBehaviorPartiallyAutomated, + DrsBehaviorFullyAutomated, + } +} + +func (e DrsBehavior) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DrsBehavior"] = reflect.TypeOf((*DrsBehavior)(nil)).Elem() } // Correlation state as computed by storageRM +// module on host. type DrsInjectorWorkloadCorrelationState string const ( @@ -1861,9 +2824,19 @@ const ( DrsInjectorWorkloadCorrelationStateUncorrelated = DrsInjectorWorkloadCorrelationState("Uncorrelated") ) +func (e DrsInjectorWorkloadCorrelationState) Values() []DrsInjectorWorkloadCorrelationState { + return []DrsInjectorWorkloadCorrelationState{ + DrsInjectorWorkloadCorrelationStateCorrelated, + DrsInjectorWorkloadCorrelationStateUncorrelated, + } +} + +func (e DrsInjectorWorkloadCorrelationState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DrsInjectorWorkloadCorrelationState"] = reflect.TypeOf((*DrsInjectorWorkloadCorrelationState)(nil)).Elem() - minAPIVersionForType["DrsInjectorWorkloadCorrelationState"] = "5.1" } // Deprecated as of VI API 2.5 use `RecommendationReasonCode_enum`. @@ -1884,10 +2857,25 @@ const ( DrsRecommendationReasonCodeHostMaint = DrsRecommendationReasonCode("hostMaint") ) +func (e DrsRecommendationReasonCode) Values() []DrsRecommendationReasonCode { + return []DrsRecommendationReasonCode{ + DrsRecommendationReasonCodeFairnessCpuAvg, + DrsRecommendationReasonCodeFairnessMemAvg, + DrsRecommendationReasonCodeJointAffin, + DrsRecommendationReasonCodeAntiAffin, + DrsRecommendationReasonCodeHostMaint, + } +} + +func (e DrsRecommendationReasonCode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DrsRecommendationReasonCode"] = reflect.TypeOf((*DrsRecommendationReasonCode)(nil)).Elem() } +// The port blocked/unblocked state. type DvsEventPortBlockState string const ( @@ -1901,15 +2889,28 @@ const ( DvsEventPortBlockStateUnknown = DvsEventPortBlockState("unknown") ) +func (e DvsEventPortBlockState) Values() []DvsEventPortBlockState { + return []DvsEventPortBlockState{ + DvsEventPortBlockStateUnset, + DvsEventPortBlockStateBlocked, + DvsEventPortBlockStateUnblocked, + DvsEventPortBlockStateUnknown, + } +} + +func (e DvsEventPortBlockState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DvsEventPortBlockState"] = reflect.TypeOf((*DvsEventPortBlockState)(nil)).Elem() - minAPIVersionForType["DvsEventPortBlockState"] = "6.5" } // Network Filter on Failure Type. // // It specifies whether all the // packets will be allowed or all the packets will be denied when +// Filter fails to configure. type DvsFilterOnFailure string const ( @@ -1919,14 +2920,25 @@ const ( DvsFilterOnFailureFailClosed = DvsFilterOnFailure("failClosed") ) +func (e DvsFilterOnFailure) Values() []DvsFilterOnFailure { + return []DvsFilterOnFailure{ + DvsFilterOnFailureFailOpen, + DvsFilterOnFailureFailClosed, + } +} + +func (e DvsFilterOnFailure) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DvsFilterOnFailure"] = reflect.TypeOf((*DvsFilterOnFailure)(nil)).Elem() - minAPIVersionForType["DvsFilterOnFailure"] = "5.5" } // Network Traffic Rule direction types. // // It specifies whether rule +// needs to be applied for packets which are incoming/outgoing or both. type DvsNetworkRuleDirectionType string const ( @@ -1941,13 +2953,25 @@ const ( DvsNetworkRuleDirectionTypeBoth = DvsNetworkRuleDirectionType("both") ) +func (e DvsNetworkRuleDirectionType) Values() []DvsNetworkRuleDirectionType { + return []DvsNetworkRuleDirectionType{ + DvsNetworkRuleDirectionTypeIncomingPackets, + DvsNetworkRuleDirectionTypeOutgoingPackets, + DvsNetworkRuleDirectionTypeBoth, + } +} + +func (e DvsNetworkRuleDirectionType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["DvsNetworkRuleDirectionType"] = reflect.TypeOf((*DvsNetworkRuleDirectionType)(nil)).Elem() - minAPIVersionForType["DvsNetworkRuleDirectionType"] = "5.5" } // The `EntityImportType_enum` enum defines the import type for a // `DistributedVirtualSwitchManager*.*DistributedVirtualSwitchManager.DVSManagerImportEntity_Task` +// operation. type EntityImportType string const ( @@ -1994,13 +3018,25 @@ const ( EntityImportTypeApplyToEntitySpecified = EntityImportType("applyToEntitySpecified") ) +func (e EntityImportType) Values() []EntityImportType { + return []EntityImportType{ + EntityImportTypeCreateEntityWithNewIdentifier, + EntityImportTypeCreateEntityWithOriginalIdentifier, + EntityImportTypeApplyToEntitySpecified, + } +} + +func (e EntityImportType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["EntityImportType"] = reflect.TypeOf((*EntityImportType)(nil)).Elem() - minAPIVersionForType["EntityImportType"] = "5.1" } // The `EntityType_enum` enum identifies // the type of entity that was exported +// (`DistributedVirtualSwitchManager.DVSManagerExportEntity_Task`). type EntityType string const ( @@ -2010,11 +3046,22 @@ const ( EntityTypeDistributedVirtualPortgroup = EntityType("distributedVirtualPortgroup") ) -func init() { - t["EntityType"] = reflect.TypeOf((*EntityType)(nil)).Elem() - minAPIVersionForType["EntityType"] = "5.1" +func (e EntityType) Values() []EntityType { + return []EntityType{ + EntityTypeDistributedVirtualSwitch, + EntityTypeDistributedVirtualPortgroup, + } } +func (e EntityType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["EntityType"] = reflect.TypeOf((*EntityType)(nil)).Elem() +} + +// Basic Comparison operators type EventAlarmExpressionComparisonOperator string const ( @@ -2032,9 +3079,23 @@ const ( EventAlarmExpressionComparisonOperatorDoesNotEndWith = EventAlarmExpressionComparisonOperator("doesNotEndWith") ) +func (e EventAlarmExpressionComparisonOperator) Values() []EventAlarmExpressionComparisonOperator { + return []EventAlarmExpressionComparisonOperator{ + EventAlarmExpressionComparisonOperatorEquals, + EventAlarmExpressionComparisonOperatorNotEqualTo, + EventAlarmExpressionComparisonOperatorStartsWith, + EventAlarmExpressionComparisonOperatorDoesNotStartWith, + EventAlarmExpressionComparisonOperatorEndsWith, + EventAlarmExpressionComparisonOperatorDoesNotEndWith, + } +} + +func (e EventAlarmExpressionComparisonOperator) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["EventAlarmExpressionComparisonOperator"] = reflect.TypeOf((*EventAlarmExpressionComparisonOperator)(nil)).Elem() - minAPIVersionForType["EventAlarmExpressionComparisonOperator"] = "4.0" } type EventCategory string @@ -2050,10 +3111,24 @@ const ( EventCategoryUser = EventCategory("user") ) +func (e EventCategory) Values() []EventCategory { + return []EventCategory{ + EventCategoryInfo, + EventCategoryWarning, + EventCategoryError, + EventCategoryUser, + } +} + +func (e EventCategory) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["EventCategory"] = reflect.TypeOf((*EventCategory)(nil)).Elem() } +// Severity level constants. type EventEventSeverity string const ( @@ -2067,9 +3142,21 @@ const ( EventEventSeverityUser = EventEventSeverity("user") ) +func (e EventEventSeverity) Values() []EventEventSeverity { + return []EventEventSeverity{ + EventEventSeverityError, + EventEventSeverityWarning, + EventEventSeverityInfo, + EventEventSeverityUser, + } +} + +func (e EventEventSeverity) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["EventEventSeverity"] = reflect.TypeOf((*EventEventSeverity)(nil)).Elem() - minAPIVersionForType["EventEventSeverity"] = "4.0" } // This option specifies how to select events based on child relationships @@ -2093,6 +3180,18 @@ const ( EventFilterSpecRecursionOptionAll = EventFilterSpecRecursionOption("all") ) +func (e EventFilterSpecRecursionOption) Values() []EventFilterSpecRecursionOption { + return []EventFilterSpecRecursionOption{ + EventFilterSpecRecursionOptionSelf, + EventFilterSpecRecursionOptionChildren, + EventFilterSpecRecursionOptionAll, + } +} + +func (e EventFilterSpecRecursionOption) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["EventFilterSpecRecursionOption"] = reflect.TypeOf((*EventFilterSpecRecursionOption)(nil)).Elem() } @@ -2107,6 +3206,19 @@ const ( FibreChannelPortTypeUnknown = FibreChannelPortType("unknown") ) +func (e FibreChannelPortType) Values() []FibreChannelPortType { + return []FibreChannelPortType{ + FibreChannelPortTypeFabric, + FibreChannelPortTypeLoop, + FibreChannelPortTypePointToPoint, + FibreChannelPortTypeUnknown, + } +} + +func (e FibreChannelPortType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["FibreChannelPortType"] = reflect.TypeOf((*FibreChannelPortType)(nil)).Elem() } @@ -2119,6 +3231,7 @@ func init() { // As the ESX host attempts hardware-accelerated operations, // it determines whether the storage device supports hardware // acceleration and sets the `HostFileSystemMountInfo.vStorageSupport` +// property accordingly. type FileSystemMountInfoVStorageSupportStatus string const ( @@ -2135,9 +3248,20 @@ const ( FileSystemMountInfoVStorageSupportStatusVStorageUnknown = FileSystemMountInfoVStorageSupportStatus("vStorageUnknown") ) +func (e FileSystemMountInfoVStorageSupportStatus) Values() []FileSystemMountInfoVStorageSupportStatus { + return []FileSystemMountInfoVStorageSupportStatus{ + FileSystemMountInfoVStorageSupportStatusVStorageSupported, + FileSystemMountInfoVStorageSupportStatusVStorageUnsupported, + FileSystemMountInfoVStorageSupportStatusVStorageUnknown, + } +} + +func (e FileSystemMountInfoVStorageSupportStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["FileSystemMountInfoVStorageSupportStatus"] = reflect.TypeOf((*FileSystemMountInfoVStorageSupportStatus)(nil)).Elem() - minAPIVersionForType["FileSystemMountInfoVStorageSupportStatus"] = "4.1" } type FolderDesiredHostState string @@ -2149,11 +3273,22 @@ const ( FolderDesiredHostStateNon_maintenance = FolderDesiredHostState("non_maintenance") ) -func init() { - t["FolderDesiredHostState"] = reflect.TypeOf((*FolderDesiredHostState)(nil)).Elem() - minAPIVersionForType["FolderDesiredHostState"] = "6.7.1" +func (e FolderDesiredHostState) Values() []FolderDesiredHostState { + return []FolderDesiredHostState{ + FolderDesiredHostStateMaintenance, + FolderDesiredHostStateNon_maintenance, + } } +func (e FolderDesiredHostState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["FolderDesiredHostState"] = reflect.TypeOf((*FolderDesiredHostState)(nil)).Elem() +} + +// HostSelectionType defines how the host was selected type FtIssuesOnHostHostSelectionType string const ( @@ -2165,9 +3300,20 @@ const ( FtIssuesOnHostHostSelectionTypeDrs = FtIssuesOnHostHostSelectionType("drs") ) +func (e FtIssuesOnHostHostSelectionType) Values() []FtIssuesOnHostHostSelectionType { + return []FtIssuesOnHostHostSelectionType{ + FtIssuesOnHostHostSelectionTypeUser, + FtIssuesOnHostHostSelectionTypeVc, + FtIssuesOnHostHostSelectionTypeDrs, + } +} + +func (e FtIssuesOnHostHostSelectionType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["FtIssuesOnHostHostSelectionType"] = reflect.TypeOf((*FtIssuesOnHostHostSelectionType)(nil)).Elem() - minAPIVersionForType["FtIssuesOnHostHostSelectionType"] = "4.0" } type GuestFileType string @@ -2182,11 +3328,23 @@ const ( GuestFileTypeSymlink = GuestFileType("symlink") ) -func init() { - t["GuestFileType"] = reflect.TypeOf((*GuestFileType)(nil)).Elem() - minAPIVersionForType["GuestFileType"] = "5.0" +func (e GuestFileType) Values() []GuestFileType { + return []GuestFileType{ + GuestFileTypeFile, + GuestFileTypeDirectory, + GuestFileTypeSymlink, + } } +func (e GuestFileType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["GuestFileType"] = reflect.TypeOf((*GuestFileType)(nil)).Elem() +} + +// Application state type. type GuestInfoAppStateType string const ( @@ -2201,9 +3359,20 @@ const ( GuestInfoAppStateTypeAppStateNeedReset = GuestInfoAppStateType("appStateNeedReset") ) +func (e GuestInfoAppStateType) Values() []GuestInfoAppStateType { + return []GuestInfoAppStateType{ + GuestInfoAppStateTypeNone, + GuestInfoAppStateTypeAppStateOk, + GuestInfoAppStateTypeAppStateNeedReset, + } +} + +func (e GuestInfoAppStateType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["GuestInfoAppStateType"] = reflect.TypeOf((*GuestInfoAppStateType)(nil)).Elem() - minAPIVersionForType["GuestInfoAppStateType"] = "5.5" } type GuestInfoCustomizationStatus string @@ -2223,11 +3392,26 @@ const ( GuestInfoCustomizationStatusTOOLSDEPLOYPKG_FAILED = GuestInfoCustomizationStatus("TOOLSDEPLOYPKG_FAILED") ) +func (e GuestInfoCustomizationStatus) Values() []GuestInfoCustomizationStatus { + return []GuestInfoCustomizationStatus{ + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_IDLE, + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_PENDING, + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_RUNNING, + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_SUCCEEDED, + GuestInfoCustomizationStatusTOOLSDEPLOYPKG_FAILED, + } +} + +func (e GuestInfoCustomizationStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["GuestInfoCustomizationStatus"] = reflect.TypeOf((*GuestInfoCustomizationStatus)(nil)).Elem() minAPIVersionForType["GuestInfoCustomizationStatus"] = "7.0.2.0" } +// Firmware types type GuestOsDescriptorFirmwareType string const ( @@ -2237,11 +3421,22 @@ const ( GuestOsDescriptorFirmwareTypeEfi = GuestOsDescriptorFirmwareType("efi") ) -func init() { - t["GuestOsDescriptorFirmwareType"] = reflect.TypeOf((*GuestOsDescriptorFirmwareType)(nil)).Elem() - minAPIVersionForType["GuestOsDescriptorFirmwareType"] = "5.0" +func (e GuestOsDescriptorFirmwareType) Values() []GuestOsDescriptorFirmwareType { + return []GuestOsDescriptorFirmwareType{ + GuestOsDescriptorFirmwareTypeBios, + GuestOsDescriptorFirmwareTypeEfi, + } } +func (e GuestOsDescriptorFirmwareType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["GuestOsDescriptorFirmwareType"] = reflect.TypeOf((*GuestOsDescriptorFirmwareType)(nil)).Elem() +} + +// Guest OS support level type GuestOsDescriptorSupportLevel string const ( @@ -2266,14 +3461,24 @@ const ( GuestOsDescriptorSupportLevelTechPreview = GuestOsDescriptorSupportLevel("techPreview") ) +func (e GuestOsDescriptorSupportLevel) Values() []GuestOsDescriptorSupportLevel { + return []GuestOsDescriptorSupportLevel{ + GuestOsDescriptorSupportLevelExperimental, + GuestOsDescriptorSupportLevelLegacy, + GuestOsDescriptorSupportLevelTerminated, + GuestOsDescriptorSupportLevelSupported, + GuestOsDescriptorSupportLevelUnsupported, + GuestOsDescriptorSupportLevelDeprecated, + GuestOsDescriptorSupportLevelTechPreview, + } +} + +func (e GuestOsDescriptorSupportLevel) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["GuestOsDescriptorSupportLevel"] = reflect.TypeOf((*GuestOsDescriptorSupportLevel)(nil)).Elem() - minAPIVersionForType["GuestOsDescriptorSupportLevel"] = "5.0" - minAPIVersionForEnumValue["GuestOsDescriptorSupportLevel"] = map[string]string{ - "unsupported": "5.1", - "deprecated": "5.1", - "techPreview": "5.1", - } } // End guest quiesce phase error types. @@ -2284,6 +3489,16 @@ const ( GuestQuiesceEndGuestQuiesceErrorFailure = GuestQuiesceEndGuestQuiesceError("failure") ) +func (e GuestQuiesceEndGuestQuiesceError) Values() []GuestQuiesceEndGuestQuiesceError { + return []GuestQuiesceEndGuestQuiesceError{ + GuestQuiesceEndGuestQuiesceErrorFailure, + } +} + +func (e GuestQuiesceEndGuestQuiesceError) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["GuestQuiesceEndGuestQuiesceError"] = reflect.TypeOf((*GuestQuiesceEndGuestQuiesceError)(nil)).Elem() } @@ -2295,6 +3510,7 @@ func init() { // that allows 32-bit Windows-based applications to run seamlessly on // 64-bit Windows. Please refer to these MSDN sites for more details: // http://msdn.microsoft.com/en-us/library/aa384249(v=vs.85).aspx and +// http://msdn.microsoft.com/en-us/library/aa384253(v=vs.85).aspx type GuestRegKeyWowSpec string const ( @@ -2308,9 +3524,20 @@ const ( GuestRegKeyWowSpecWOW64 = GuestRegKeyWowSpec("WOW64") ) +func (e GuestRegKeyWowSpec) Values() []GuestRegKeyWowSpec { + return []GuestRegKeyWowSpec{ + GuestRegKeyWowSpecWOWNative, + GuestRegKeyWowSpecWOW32, + GuestRegKeyWowSpecWOW64, + } +} + +func (e GuestRegKeyWowSpec) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["GuestRegKeyWowSpec"] = reflect.TypeOf((*GuestRegKeyWowSpec)(nil)).Elem() - minAPIVersionForType["GuestRegKeyWowSpec"] = "6.0" } type HealthUpdateInfoComponentType string @@ -2323,9 +3550,22 @@ const ( HealthUpdateInfoComponentTypeStorage = HealthUpdateInfoComponentType("Storage") ) +func (e HealthUpdateInfoComponentType) Values() []HealthUpdateInfoComponentType { + return []HealthUpdateInfoComponentType{ + HealthUpdateInfoComponentTypeMemory, + HealthUpdateInfoComponentTypePower, + HealthUpdateInfoComponentTypeFan, + HealthUpdateInfoComponentTypeNetwork, + HealthUpdateInfoComponentTypeStorage, + } +} + +func (e HealthUpdateInfoComponentType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HealthUpdateInfoComponentType"] = reflect.TypeOf((*HealthUpdateInfoComponentType)(nil)).Elem() - minAPIVersionForType["HealthUpdateInfoComponentType"] = "6.5" } // Defines different access modes that a user may have on the host for @@ -2333,6 +3573,7 @@ func init() { // // The assumption here is that when the host is managed by vCenter, // we don't need fine-grained control on local user permissions like the +// interface provided by `AuthorizationManager`. type HostAccessMode string const ( @@ -2371,9 +3612,22 @@ const ( HostAccessModeAccessOther = HostAccessMode("accessOther") ) +func (e HostAccessMode) Values() []HostAccessMode { + return []HostAccessMode{ + HostAccessModeAccessNone, + HostAccessModeAccessAdmin, + HostAccessModeAccessNoAccess, + HostAccessModeAccessReadOnly, + HostAccessModeAccessOther, + } +} + +func (e HostAccessMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostAccessMode"] = reflect.TypeOf((*HostAccessMode)(nil)).Elem() - minAPIVersionForType["HostAccessMode"] = "6.0" } type HostActiveDirectoryAuthenticationCertificateDigest string @@ -2382,9 +3636,18 @@ const ( HostActiveDirectoryAuthenticationCertificateDigestSHA1 = HostActiveDirectoryAuthenticationCertificateDigest("SHA1") ) +func (e HostActiveDirectoryAuthenticationCertificateDigest) Values() []HostActiveDirectoryAuthenticationCertificateDigest { + return []HostActiveDirectoryAuthenticationCertificateDigest{ + HostActiveDirectoryAuthenticationCertificateDigestSHA1, + } +} + +func (e HostActiveDirectoryAuthenticationCertificateDigest) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostActiveDirectoryAuthenticationCertificateDigest"] = reflect.TypeOf((*HostActiveDirectoryAuthenticationCertificateDigest)(nil)).Elem() - minAPIVersionForType["HostActiveDirectoryAuthenticationCertificateDigest"] = "6.0" } type HostActiveDirectoryInfoDomainMembershipStatus string @@ -2409,9 +3672,24 @@ const ( HostActiveDirectoryInfoDomainMembershipStatusOtherProblem = HostActiveDirectoryInfoDomainMembershipStatus("otherProblem") ) +func (e HostActiveDirectoryInfoDomainMembershipStatus) Values() []HostActiveDirectoryInfoDomainMembershipStatus { + return []HostActiveDirectoryInfoDomainMembershipStatus{ + HostActiveDirectoryInfoDomainMembershipStatusUnknown, + HostActiveDirectoryInfoDomainMembershipStatusOk, + HostActiveDirectoryInfoDomainMembershipStatusNoServers, + HostActiveDirectoryInfoDomainMembershipStatusClientTrustBroken, + HostActiveDirectoryInfoDomainMembershipStatusServerTrustBroken, + HostActiveDirectoryInfoDomainMembershipStatusInconsistentTrust, + HostActiveDirectoryInfoDomainMembershipStatusOtherProblem, + } +} + +func (e HostActiveDirectoryInfoDomainMembershipStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostActiveDirectoryInfoDomainMembershipStatus"] = reflect.TypeOf((*HostActiveDirectoryInfoDomainMembershipStatus)(nil)).Elem() - minAPIVersionForType["HostActiveDirectoryInfoDomainMembershipStatus"] = "4.1" } type HostBIOSInfoFirmwareType string @@ -2421,6 +3699,17 @@ const ( HostBIOSInfoFirmwareTypeUEFI = HostBIOSInfoFirmwareType("UEFI") ) +func (e HostBIOSInfoFirmwareType) Values() []HostBIOSInfoFirmwareType { + return []HostBIOSInfoFirmwareType{ + HostBIOSInfoFirmwareTypeBIOS, + HostBIOSInfoFirmwareTypeUEFI, + } +} + +func (e HostBIOSInfoFirmwareType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostBIOSInfoFirmwareType"] = reflect.TypeOf((*HostBIOSInfoFirmwareType)(nil)).Elem() minAPIVersionForType["HostBIOSInfoFirmwareType"] = "8.0.2.0" @@ -2430,6 +3719,7 @@ func init() { // `VmFaultToleranceConfigIssueReasonForIssue_enum`. // // Set of possible values for +// `HostCapability.ftCompatibilityIssues` type HostCapabilityFtUnsupportedReason string const ( @@ -2455,17 +3745,29 @@ const ( HostCapabilityFtUnsupportedReasonCpuHvDisabled = HostCapabilityFtUnsupportedReason("cpuHvDisabled") ) -func init() { - t["HostCapabilityFtUnsupportedReason"] = reflect.TypeOf((*HostCapabilityFtUnsupportedReason)(nil)).Elem() - minAPIVersionForType["HostCapabilityFtUnsupportedReason"] = "4.1" - minAPIVersionForEnumValue["HostCapabilityFtUnsupportedReason"] = map[string]string{ - "unsupportedProduct": "6.0", - "cpuHvUnsupported": "6.0", - "cpuHwmmuUnsupported": "6.0", - "cpuHvDisabled": "6.0", +func (e HostCapabilityFtUnsupportedReason) Values() []HostCapabilityFtUnsupportedReason { + return []HostCapabilityFtUnsupportedReason{ + HostCapabilityFtUnsupportedReasonVMotionNotLicensed, + HostCapabilityFtUnsupportedReasonMissingVMotionNic, + HostCapabilityFtUnsupportedReasonMissingFTLoggingNic, + HostCapabilityFtUnsupportedReasonFtNotLicensed, + HostCapabilityFtUnsupportedReasonHaAgentIssue, + HostCapabilityFtUnsupportedReasonUnsupportedProduct, + HostCapabilityFtUnsupportedReasonCpuHvUnsupported, + HostCapabilityFtUnsupportedReasonCpuHwmmuUnsupported, + HostCapabilityFtUnsupportedReasonCpuHvDisabled, } } +func (e HostCapabilityFtUnsupportedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostCapabilityFtUnsupportedReason"] = reflect.TypeOf((*HostCapabilityFtUnsupportedReason)(nil)).Elem() +} + +// Set of VMFS unmap API version. type HostCapabilityUnmapMethodSupported string const ( @@ -2478,11 +3780,23 @@ const ( HostCapabilityUnmapMethodSupportedDynamic = HostCapabilityUnmapMethodSupported("dynamic") ) -func init() { - t["HostCapabilityUnmapMethodSupported"] = reflect.TypeOf((*HostCapabilityUnmapMethodSupported)(nil)).Elem() - minAPIVersionForType["HostCapabilityUnmapMethodSupported"] = "6.7" +func (e HostCapabilityUnmapMethodSupported) Values() []HostCapabilityUnmapMethodSupported { + return []HostCapabilityUnmapMethodSupported{ + HostCapabilityUnmapMethodSupportedPriority, + HostCapabilityUnmapMethodSupportedFixed, + HostCapabilityUnmapMethodSupportedDynamic, + } } +func (e HostCapabilityUnmapMethodSupported) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostCapabilityUnmapMethodSupported"] = reflect.TypeOf((*HostCapabilityUnmapMethodSupported)(nil)).Elem() +} + +// Set of possible values for `HostCapability.vmDirectPathGen2UnsupportedReason`. type HostCapabilityVmDirectPathGen2UnsupportedReason string const ( @@ -2498,9 +3812,20 @@ const ( HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptDisabled = HostCapabilityVmDirectPathGen2UnsupportedReason("hostNptDisabled") ) +func (e HostCapabilityVmDirectPathGen2UnsupportedReason) Values() []HostCapabilityVmDirectPathGen2UnsupportedReason { + return []HostCapabilityVmDirectPathGen2UnsupportedReason{ + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptIncompatibleProduct, + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptIncompatibleHardware, + HostCapabilityVmDirectPathGen2UnsupportedReasonHostNptDisabled, + } +} + +func (e HostCapabilityVmDirectPathGen2UnsupportedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostCapabilityVmDirectPathGen2UnsupportedReason"] = reflect.TypeOf((*HostCapabilityVmDirectPathGen2UnsupportedReason)(nil)).Elem() - minAPIVersionForType["HostCapabilityVmDirectPathGen2UnsupportedReason"] = "4.1" } // The status of a given certificate as computed per the soft and the hard @@ -2522,6 +3847,7 @@ func init() { // Hard Threshold: // // vCenter Server will publish an alarm and indicate via the UI that the +// certificate expiration is imminent. type HostCertificateManagerCertificateInfoCertificateStatus string const ( @@ -2545,9 +3871,23 @@ const ( HostCertificateManagerCertificateInfoCertificateStatusGood = HostCertificateManagerCertificateInfoCertificateStatus("good") ) +func (e HostCertificateManagerCertificateInfoCertificateStatus) Values() []HostCertificateManagerCertificateInfoCertificateStatus { + return []HostCertificateManagerCertificateInfoCertificateStatus{ + HostCertificateManagerCertificateInfoCertificateStatusUnknown, + HostCertificateManagerCertificateInfoCertificateStatusExpired, + HostCertificateManagerCertificateInfoCertificateStatusExpiring, + HostCertificateManagerCertificateInfoCertificateStatusExpiringShortly, + HostCertificateManagerCertificateInfoCertificateStatusExpirationImminent, + HostCertificateManagerCertificateInfoCertificateStatusGood, + } +} + +func (e HostCertificateManagerCertificateInfoCertificateStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostCertificateManagerCertificateInfoCertificateStatus"] = reflect.TypeOf((*HostCertificateManagerCertificateInfoCertificateStatus)(nil)).Elem() - minAPIVersionForType["HostCertificateManagerCertificateInfoCertificateStatus"] = "6.0" } type HostCertificateManagerCertificateKind string @@ -2559,6 +3899,17 @@ const ( HostCertificateManagerCertificateKindVASAClient = HostCertificateManagerCertificateKind("VASAClient") ) +func (e HostCertificateManagerCertificateKind) Values() []HostCertificateManagerCertificateKind { + return []HostCertificateManagerCertificateKind{ + HostCertificateManagerCertificateKindMachine, + HostCertificateManagerCertificateKindVASAClient, + } +} + +func (e HostCertificateManagerCertificateKind) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostCertificateManagerCertificateKind"] = reflect.TypeOf((*HostCertificateManagerCertificateKind)(nil)).Elem() minAPIVersionForType["HostCertificateManagerCertificateKind"] = "8.0.1.0" @@ -2578,6 +3929,17 @@ const ( HostConfigChangeModeReplace = HostConfigChangeMode("replace") ) +func (e HostConfigChangeMode) Values() []HostConfigChangeMode { + return []HostConfigChangeMode{ + HostConfigChangeModeModify, + HostConfigChangeModeReplace, + } +} + +func (e HostConfigChangeMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostConfigChangeMode"] = reflect.TypeOf((*HostConfigChangeMode)(nil)).Elem() } @@ -2601,11 +3963,21 @@ const ( HostConfigChangeOperationIgnore = HostConfigChangeOperation("ignore") ) +func (e HostConfigChangeOperation) Values() []HostConfigChangeOperation { + return []HostConfigChangeOperation{ + HostConfigChangeOperationAdd, + HostConfigChangeOperationRemove, + HostConfigChangeOperationEdit, + HostConfigChangeOperationIgnore, + } +} + +func (e HostConfigChangeOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostConfigChangeOperation"] = reflect.TypeOf((*HostConfigChangeOperation)(nil)).Elem() - minAPIVersionForEnumValue["HostConfigChangeOperation"] = map[string]string{ - "ignore": "5.5", - } } type HostCpuPackageVendor string @@ -2614,17 +3986,27 @@ const ( HostCpuPackageVendorUnknown = HostCpuPackageVendor("unknown") HostCpuPackageVendorIntel = HostCpuPackageVendor("intel") HostCpuPackageVendorAmd = HostCpuPackageVendor("amd") - // `**Since:**` vSphere API Release 6.7.1 - HostCpuPackageVendorHygon = HostCpuPackageVendor("hygon") + HostCpuPackageVendorHygon = HostCpuPackageVendor("hygon") ) -func init() { - t["HostCpuPackageVendor"] = reflect.TypeOf((*HostCpuPackageVendor)(nil)).Elem() - minAPIVersionForEnumValue["HostCpuPackageVendor"] = map[string]string{ - "hygon": "6.7.1", +func (e HostCpuPackageVendor) Values() []HostCpuPackageVendor { + return []HostCpuPackageVendor{ + HostCpuPackageVendorUnknown, + HostCpuPackageVendorIntel, + HostCpuPackageVendorAmd, + HostCpuPackageVendorHygon, } } +func (e HostCpuPackageVendor) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostCpuPackageVendor"] = reflect.TypeOf((*HostCpuPackageVendor)(nil)).Elem() +} + +// Possible values for Current CPU power management policy type HostCpuPowerManagementInfoPolicyType string const ( @@ -2633,11 +4015,54 @@ const ( HostCpuPowerManagementInfoPolicyTypeDynamicPolicy = HostCpuPowerManagementInfoPolicyType("dynamicPolicy") ) -func init() { - t["HostCpuPowerManagementInfoPolicyType"] = reflect.TypeOf((*HostCpuPowerManagementInfoPolicyType)(nil)).Elem() - minAPIVersionForType["HostCpuPowerManagementInfoPolicyType"] = "4.0" +func (e HostCpuPowerManagementInfoPolicyType) Values() []HostCpuPowerManagementInfoPolicyType { + return []HostCpuPowerManagementInfoPolicyType{ + HostCpuPowerManagementInfoPolicyTypeOff, + HostCpuPowerManagementInfoPolicyTypeStaticPolicy, + HostCpuPowerManagementInfoPolicyTypeDynamicPolicy, + } } +func (e HostCpuPowerManagementInfoPolicyType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostCpuPowerManagementInfoPolicyType"] = reflect.TypeOf((*HostCpuPowerManagementInfoPolicyType)(nil)).Elem() +} + +type HostCpuSchedulerInfoCpuSchedulerPolicyInfo string + +const ( + // The CPU scheduler on this host is running without any modifications + // or mitigations. + HostCpuSchedulerInfoCpuSchedulerPolicyInfoSystemDefault = HostCpuSchedulerInfoCpuSchedulerPolicyInfo("systemDefault") + // The CPU scheduler on this host is using only one hyperthread per + // core to mitigate a security vulnerability. + HostCpuSchedulerInfoCpuSchedulerPolicyInfoScav1 = HostCpuSchedulerInfoCpuSchedulerPolicyInfo("scav1") + // The CPU scheduler on this host is using hyperthreads, with + // Side-Channel aware scheduling to mitigate a security vulnerability. + HostCpuSchedulerInfoCpuSchedulerPolicyInfoScav2 = HostCpuSchedulerInfoCpuSchedulerPolicyInfo("scav2") +) + +func (e HostCpuSchedulerInfoCpuSchedulerPolicyInfo) Values() []HostCpuSchedulerInfoCpuSchedulerPolicyInfo { + return []HostCpuSchedulerInfoCpuSchedulerPolicyInfo{ + HostCpuSchedulerInfoCpuSchedulerPolicyInfoSystemDefault, + HostCpuSchedulerInfoCpuSchedulerPolicyInfoScav1, + HostCpuSchedulerInfoCpuSchedulerPolicyInfoScav2, + } +} + +func (e HostCpuSchedulerInfoCpuSchedulerPolicyInfo) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostCpuSchedulerInfoCpuSchedulerPolicyInfo"] = reflect.TypeOf((*HostCpuSchedulerInfoCpuSchedulerPolicyInfo)(nil)).Elem() + minAPIVersionForType["HostCpuSchedulerInfoCpuSchedulerPolicyInfo"] = "8.0.3.0" +} + +// Defines a host's encryption state type HostCryptoState string const ( @@ -2657,12 +4082,21 @@ const ( HostCryptoStatePendingIncapable = HostCryptoState("pendingIncapable") ) +func (e HostCryptoState) Values() []HostCryptoState { + return []HostCryptoState{ + HostCryptoStateIncapable, + HostCryptoStatePrepared, + HostCryptoStateSafe, + HostCryptoStatePendingIncapable, + } +} + +func (e HostCryptoState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostCryptoState"] = reflect.TypeOf((*HostCryptoState)(nil)).Elem() - minAPIVersionForType["HostCryptoState"] = "6.5" - minAPIVersionForEnumValue["HostCryptoState"] = map[string]string{ - "pendingIncapable": "7.0", - } } type HostDVSConfigSpecSwitchMode string @@ -2674,6 +4108,17 @@ const ( HostDVSConfigSpecSwitchModeMux = HostDVSConfigSpecSwitchMode("mux") ) +func (e HostDVSConfigSpecSwitchMode) Values() []HostDVSConfigSpecSwitchMode { + return []HostDVSConfigSpecSwitchMode{ + HostDVSConfigSpecSwitchModeNormal, + HostDVSConfigSpecSwitchModeMux, + } +} + +func (e HostDVSConfigSpecSwitchMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostDVSConfigSpecSwitchMode"] = reflect.TypeOf((*HostDVSConfigSpecSwitchMode)(nil)).Elem() minAPIVersionForType["HostDVSConfigSpecSwitchMode"] = "8.0.0.1" @@ -2700,14 +4145,28 @@ const ( HostDasErrorEventHostDasErrorReasonOther = HostDasErrorEventHostDasErrorReason("other") ) -func init() { - t["HostDasErrorEventHostDasErrorReason"] = reflect.TypeOf((*HostDasErrorEventHostDasErrorReason)(nil)).Elem() - minAPIVersionForType["HostDasErrorEventHostDasErrorReason"] = "4.0" - minAPIVersionForEnumValue["HostDasErrorEventHostDasErrorReason"] = map[string]string{ - "isolationAddressUnpingable": "4.1", +func (e HostDasErrorEventHostDasErrorReason) Values() []HostDasErrorEventHostDasErrorReason { + return []HostDasErrorEventHostDasErrorReason{ + HostDasErrorEventHostDasErrorReasonConfigFailed, + HostDasErrorEventHostDasErrorReasonTimeout, + HostDasErrorEventHostDasErrorReasonCommunicationInitFailed, + HostDasErrorEventHostDasErrorReasonHealthCheckScriptFailed, + HostDasErrorEventHostDasErrorReasonAgentFailed, + HostDasErrorEventHostDasErrorReasonAgentShutdown, + HostDasErrorEventHostDasErrorReasonIsolationAddressUnpingable, + HostDasErrorEventHostDasErrorReasonOther, } } +func (e HostDasErrorEventHostDasErrorReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostDasErrorEventHostDasErrorReason"] = reflect.TypeOf((*HostDasErrorEventHostDasErrorReason)(nil)).Elem() +} + +// Types of time synchronization protocols. type HostDateTimeInfoProtocol string const ( @@ -2717,41 +4176,54 @@ const ( HostDateTimeInfoProtocolPtp = HostDateTimeInfoProtocol("ptp") ) +func (e HostDateTimeInfoProtocol) Values() []HostDateTimeInfoProtocol { + return []HostDateTimeInfoProtocol{ + HostDateTimeInfoProtocolNtp, + HostDateTimeInfoProtocolPtp, + } +} + +func (e HostDateTimeInfoProtocol) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostDateTimeInfoProtocol"] = reflect.TypeOf((*HostDateTimeInfoProtocol)(nil)).Elem() - minAPIVersionForType["HostDateTimeInfoProtocol"] = "7.0" } // The set of digest methods that can be used by TPM to calculate the PCR +// values. type HostDigestInfoDigestMethodType string const ( HostDigestInfoDigestMethodTypeSHA1 = HostDigestInfoDigestMethodType("SHA1") - // - // // Deprecated as of vSphere API 6.7. // // MD5. - HostDigestInfoDigestMethodTypeMD5 = HostDigestInfoDigestMethodType("MD5") - // `**Since:**` vSphere API Release 6.7 - HostDigestInfoDigestMethodTypeSHA256 = HostDigestInfoDigestMethodType("SHA256") - // `**Since:**` vSphere API Release 6.7 - HostDigestInfoDigestMethodTypeSHA384 = HostDigestInfoDigestMethodType("SHA384") - // `**Since:**` vSphere API Release 6.7 - HostDigestInfoDigestMethodTypeSHA512 = HostDigestInfoDigestMethodType("SHA512") - // `**Since:**` vSphere API Release 6.7 + HostDigestInfoDigestMethodTypeMD5 = HostDigestInfoDigestMethodType("MD5") + HostDigestInfoDigestMethodTypeSHA256 = HostDigestInfoDigestMethodType("SHA256") + HostDigestInfoDigestMethodTypeSHA384 = HostDigestInfoDigestMethodType("SHA384") + HostDigestInfoDigestMethodTypeSHA512 = HostDigestInfoDigestMethodType("SHA512") HostDigestInfoDigestMethodTypeSM3_256 = HostDigestInfoDigestMethodType("SM3_256") ) +func (e HostDigestInfoDigestMethodType) Values() []HostDigestInfoDigestMethodType { + return []HostDigestInfoDigestMethodType{ + HostDigestInfoDigestMethodTypeSHA1, + HostDigestInfoDigestMethodTypeMD5, + HostDigestInfoDigestMethodTypeSHA256, + HostDigestInfoDigestMethodTypeSHA384, + HostDigestInfoDigestMethodTypeSHA512, + HostDigestInfoDigestMethodTypeSM3_256, + } +} + +func (e HostDigestInfoDigestMethodType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostDigestInfoDigestMethodType"] = reflect.TypeOf((*HostDigestInfoDigestMethodType)(nil)).Elem() - minAPIVersionForType["HostDigestInfoDigestMethodType"] = "4.0" - minAPIVersionForEnumValue["HostDigestInfoDigestMethodType"] = map[string]string{ - "SHA256": "6.7", - "SHA384": "6.7", - "SHA512": "6.7", - "SM3_256": "6.7", - } } // This enum specifies the supported digest verification settings. @@ -2773,6 +4245,19 @@ const ( HostDigestVerificationSettingHeaderAndData = HostDigestVerificationSetting("headerAndData") ) +func (e HostDigestVerificationSetting) Values() []HostDigestVerificationSetting { + return []HostDigestVerificationSetting{ + HostDigestVerificationSettingDigestDisabled, + HostDigestVerificationSettingHeaderOnly, + HostDigestVerificationSettingDataOnly, + HostDigestVerificationSettingHeaderAndData, + } +} + +func (e HostDigestVerificationSetting) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostDigestVerificationSetting"] = reflect.TypeOf((*HostDigestVerificationSetting)(nil)).Elem() minAPIVersionForType["HostDigestVerificationSetting"] = "7.0.3.0" @@ -2801,18 +4286,31 @@ const ( HostDisconnectedEventReasonCodeVcVRAMCapacityExceeded = HostDisconnectedEventReasonCode("vcVRAMCapacityExceeded") ) -func init() { - t["HostDisconnectedEventReasonCode"] = reflect.TypeOf((*HostDisconnectedEventReasonCode)(nil)).Elem() - minAPIVersionForType["HostDisconnectedEventReasonCode"] = "4.0" - minAPIVersionForEnumValue["HostDisconnectedEventReasonCode"] = map[string]string{ - "agentOutOfDate": "4.1", - "passwordDecryptFailure": "4.1", - "unknown": "4.1", - "vcVRAMCapacityExceeded": "5.1", +func (e HostDisconnectedEventReasonCode) Values() []HostDisconnectedEventReasonCode { + return []HostDisconnectedEventReasonCode{ + HostDisconnectedEventReasonCodeSslThumbprintVerifyFailed, + HostDisconnectedEventReasonCodeLicenseExpired, + HostDisconnectedEventReasonCodeAgentUpgrade, + HostDisconnectedEventReasonCodeUserRequest, + HostDisconnectedEventReasonCodeInsufficientLicenses, + HostDisconnectedEventReasonCodeAgentOutOfDate, + HostDisconnectedEventReasonCodePasswordDecryptFailure, + HostDisconnectedEventReasonCodeUnknown, + HostDisconnectedEventReasonCodeVcVRAMCapacityExceeded, } } +func (e HostDisconnectedEventReasonCode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostDisconnectedEventReasonCode"] = reflect.TypeOf((*HostDisconnectedEventReasonCode)(nil)).Elem() +} + // List of partition format types. +// +// This denotes the partition table layout. type HostDiskPartitionInfoPartitionFormat string const ( @@ -2821,9 +4319,20 @@ const ( HostDiskPartitionInfoPartitionFormatUnknown = HostDiskPartitionInfoPartitionFormat("unknown") ) +func (e HostDiskPartitionInfoPartitionFormat) Values() []HostDiskPartitionInfoPartitionFormat { + return []HostDiskPartitionInfoPartitionFormat{ + HostDiskPartitionInfoPartitionFormatGpt, + HostDiskPartitionInfoPartitionFormatMbr, + HostDiskPartitionInfoPartitionFormatUnknown, + } +} + +func (e HostDiskPartitionInfoPartitionFormat) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostDiskPartitionInfoPartitionFormat"] = reflect.TypeOf((*HostDiskPartitionInfoPartitionFormat)(nil)).Elem() - minAPIVersionForType["HostDiskPartitionInfoPartitionFormat"] = "5.0" } // List of symbol partition types @@ -2837,19 +4346,82 @@ const ( HostDiskPartitionInfoTypeExtended = HostDiskPartitionInfoType("extended") HostDiskPartitionInfoTypeNtfs = HostDiskPartitionInfoType("ntfs") HostDiskPartitionInfoTypeVmkDiagnostic = HostDiskPartitionInfoType("vmkDiagnostic") - // `**Since:**` vSphere API Release 5.5 - HostDiskPartitionInfoTypeVffs = HostDiskPartitionInfoType("vffs") + HostDiskPartitionInfoTypeVffs = HostDiskPartitionInfoType("vffs") ) +func (e HostDiskPartitionInfoType) Values() []HostDiskPartitionInfoType { + return []HostDiskPartitionInfoType{ + HostDiskPartitionInfoTypeNone, + HostDiskPartitionInfoTypeVmfs, + HostDiskPartitionInfoTypeLinuxNative, + HostDiskPartitionInfoTypeLinuxSwap, + HostDiskPartitionInfoTypeExtended, + HostDiskPartitionInfoTypeNtfs, + HostDiskPartitionInfoTypeVmkDiagnostic, + HostDiskPartitionInfoTypeVffs, + } +} + +func (e HostDiskPartitionInfoType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostDiskPartitionInfoType"] = reflect.TypeOf((*HostDiskPartitionInfoType)(nil)).Elem() - minAPIVersionForEnumValue["HostDiskPartitionInfoType"] = map[string]string{ - "vffs": "5.5", +} + +type HostDistributedVirtualSwitchManagerFailoverReason string + +const ( + // The failover is caused by DPU crash. + HostDistributedVirtualSwitchManagerFailoverReasonCrash = HostDistributedVirtualSwitchManagerFailoverReason("crash") + // The failover is caused by DPU's vmnic(s) link down. + HostDistributedVirtualSwitchManagerFailoverReasonLinkDown = HostDistributedVirtualSwitchManagerFailoverReason("linkDown") + // The failover is triggered by the user. + HostDistributedVirtualSwitchManagerFailoverReasonUserInitiated = HostDistributedVirtualSwitchManagerFailoverReason("userInitiated") +) + +func (e HostDistributedVirtualSwitchManagerFailoverReason) Values() []HostDistributedVirtualSwitchManagerFailoverReason { + return []HostDistributedVirtualSwitchManagerFailoverReason{ + HostDistributedVirtualSwitchManagerFailoverReasonCrash, + HostDistributedVirtualSwitchManagerFailoverReasonLinkDown, + HostDistributedVirtualSwitchManagerFailoverReasonUserInitiated, } } +func (e HostDistributedVirtualSwitchManagerFailoverReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostDistributedVirtualSwitchManagerFailoverReason"] = reflect.TypeOf((*HostDistributedVirtualSwitchManagerFailoverReason)(nil)).Elem() + minAPIVersionForType["HostDistributedVirtualSwitchManagerFailoverReason"] = "8.0.3.0" +} + +type HostDistributedVirtualSwitchManagerFailoverStage string + +const ( + HostDistributedVirtualSwitchManagerFailoverStageSTAGE_1 = HostDistributedVirtualSwitchManagerFailoverStage("STAGE_1") +) + +func (e HostDistributedVirtualSwitchManagerFailoverStage) Values() []HostDistributedVirtualSwitchManagerFailoverStage { + return []HostDistributedVirtualSwitchManagerFailoverStage{ + HostDistributedVirtualSwitchManagerFailoverStageSTAGE_1, + } +} + +func (e HostDistributedVirtualSwitchManagerFailoverStage) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostDistributedVirtualSwitchManagerFailoverStage"] = reflect.TypeOf((*HostDistributedVirtualSwitchManagerFailoverStage)(nil)).Elem() + minAPIVersionForType["HostDistributedVirtualSwitchManagerFailoverStage"] = "8.0.3.0" +} + // Set of possible values for // `HostFeatureVersionInfo.key`, which +// is a unique key that identifies a feature. type HostFeatureVersionKey string const ( @@ -2866,11 +4438,21 @@ const ( HostFeatureVersionKeyFaultTolerance = HostFeatureVersionKey("faultTolerance") ) -func init() { - t["HostFeatureVersionKey"] = reflect.TypeOf((*HostFeatureVersionKey)(nil)).Elem() - minAPIVersionForType["HostFeatureVersionKey"] = "4.1" +func (e HostFeatureVersionKey) Values() []HostFeatureVersionKey { + return []HostFeatureVersionKey{ + HostFeatureVersionKeyFaultTolerance, + } } +func (e HostFeatureVersionKey) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostFeatureVersionKey"] = reflect.TypeOf((*HostFeatureVersionKey)(nil)).Elem() +} + +// Type of file system volume. type HostFileSystemVolumeFileSystemType string const ( @@ -2913,15 +4495,28 @@ const ( HostFileSystemVolumeFileSystemTypeOTHER = HostFileSystemVolumeFileSystemType("OTHER") ) +func (e HostFileSystemVolumeFileSystemType) Values() []HostFileSystemVolumeFileSystemType { + return []HostFileSystemVolumeFileSystemType{ + HostFileSystemVolumeFileSystemTypeVMFS, + HostFileSystemVolumeFileSystemTypeNFS, + HostFileSystemVolumeFileSystemTypeNFS41, + HostFileSystemVolumeFileSystemTypeCIFS, + HostFileSystemVolumeFileSystemTypeVsan, + HostFileSystemVolumeFileSystemTypeVFFS, + HostFileSystemVolumeFileSystemTypeVVOL, + HostFileSystemVolumeFileSystemTypePMEM, + HostFileSystemVolumeFileSystemTypeVsanD, + HostFileSystemVolumeFileSystemTypeOTHER, + } +} + +func (e HostFileSystemVolumeFileSystemType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostFileSystemVolumeFileSystemType"] = reflect.TypeOf((*HostFileSystemVolumeFileSystemType)(nil)).Elem() - minAPIVersionForType["HostFileSystemVolumeFileSystemType"] = "6.0" minAPIVersionForEnumValue["HostFileSystemVolumeFileSystemType"] = map[string]string{ - "NFS41": "6.0", - "vsan": "6.0", - "VFFS": "6.0", - "VVOL": "6.0", - "PMEM": "6.7", "vsanD": "7.0.1.0", } } @@ -2934,10 +4529,22 @@ const ( HostFirewallRuleDirectionOutbound = HostFirewallRuleDirection("outbound") ) +func (e HostFirewallRuleDirection) Values() []HostFirewallRuleDirection { + return []HostFirewallRuleDirection{ + HostFirewallRuleDirectionInbound, + HostFirewallRuleDirectionOutbound, + } +} + +func (e HostFirewallRuleDirection) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostFirewallRuleDirection"] = reflect.TypeOf((*HostFirewallRuleDirection)(nil)).Elem() } +// Enumeration of port types. type HostFirewallRulePortType string const ( @@ -2945,9 +4552,19 @@ const ( HostFirewallRulePortTypeDst = HostFirewallRulePortType("dst") ) +func (e HostFirewallRulePortType) Values() []HostFirewallRulePortType { + return []HostFirewallRulePortType{ + HostFirewallRulePortTypeSrc, + HostFirewallRulePortTypeDst, + } +} + +func (e HostFirewallRulePortType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostFirewallRulePortType"] = reflect.TypeOf((*HostFirewallRulePortType)(nil)).Elem() - minAPIVersionForType["HostFirewallRulePortType"] = "5.0" } // Set of valid port protocols. @@ -2958,6 +4575,17 @@ const ( HostFirewallRuleProtocolUdp = HostFirewallRuleProtocol("udp") ) +func (e HostFirewallRuleProtocol) Values() []HostFirewallRuleProtocol { + return []HostFirewallRuleProtocol{ + HostFirewallRuleProtocolTcp, + HostFirewallRuleProtocolUdp, + } +} + +func (e HostFirewallRuleProtocol) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostFirewallRuleProtocol"] = reflect.TypeOf((*HostFirewallRuleProtocol)(nil)).Elem() } @@ -2971,6 +4599,19 @@ const ( HostFirewallSystemRuleSetIdVpxHeartbeats = HostFirewallSystemRuleSetId("vpxHeartbeats") ) +func (e HostFirewallSystemRuleSetId) Values() []HostFirewallSystemRuleSetId { + return []HostFirewallSystemRuleSetId{ + HostFirewallSystemRuleSetIdFaultTolerance, + HostFirewallSystemRuleSetIdFdm, + HostFirewallSystemRuleSetIdUpdateManager, + HostFirewallSystemRuleSetIdVpxHeartbeats, + } +} + +func (e HostFirewallSystemRuleSetId) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostFirewallSystemRuleSetId"] = reflect.TypeOf((*HostFirewallSystemRuleSetId)(nil)).Elem() minAPIVersionForType["HostFirewallSystemRuleSetId"] = "8.0.2.0" @@ -2982,6 +4623,16 @@ const ( HostFirewallSystemServiceNameVpxa = HostFirewallSystemServiceName("vpxa") ) +func (e HostFirewallSystemServiceName) Values() []HostFirewallSystemServiceName { + return []HostFirewallSystemServiceName{ + HostFirewallSystemServiceNameVpxa, + } +} + +func (e HostFirewallSystemServiceName) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostFirewallSystemServiceName"] = reflect.TypeOf((*HostFirewallSystemServiceName)(nil)).Elem() minAPIVersionForType["HostFirewallSystemServiceName"] = "8.0.2.0" @@ -2996,10 +4647,23 @@ const ( HostFruFruTypeProduct = HostFruFruType("product") ) +func (e HostFruFruType) Values() []HostFruFruType { + return []HostFruFruType{ + HostFruFruTypeUndefined, + HostFruFruTypeBoard, + HostFruFruTypeProduct, + } +} + +func (e HostFruFruType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostFruFruType"] = reflect.TypeOf((*HostFruFruType)(nil)).Elem() } +// Supported values for graphics type. type HostGraphicsConfigGraphicsType string const ( @@ -3013,11 +4677,22 @@ const ( HostGraphicsConfigGraphicsTypeSharedDirect = HostGraphicsConfigGraphicsType("sharedDirect") ) -func init() { - t["HostGraphicsConfigGraphicsType"] = reflect.TypeOf((*HostGraphicsConfigGraphicsType)(nil)).Elem() - minAPIVersionForType["HostGraphicsConfigGraphicsType"] = "6.5" +func (e HostGraphicsConfigGraphicsType) Values() []HostGraphicsConfigGraphicsType { + return []HostGraphicsConfigGraphicsType{ + HostGraphicsConfigGraphicsTypeShared, + HostGraphicsConfigGraphicsTypeSharedDirect, + } } +func (e HostGraphicsConfigGraphicsType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostGraphicsConfigGraphicsType"] = reflect.TypeOf((*HostGraphicsConfigGraphicsType)(nil)).Elem() +} + +// Supported values for shared passthrough assignment policy type HostGraphicsConfigSharedPassthruAssignmentPolicy string const ( @@ -3027,11 +4702,47 @@ const ( HostGraphicsConfigSharedPassthruAssignmentPolicyConsolidation = HostGraphicsConfigSharedPassthruAssignmentPolicy("consolidation") ) -func init() { - t["HostGraphicsConfigSharedPassthruAssignmentPolicy"] = reflect.TypeOf((*HostGraphicsConfigSharedPassthruAssignmentPolicy)(nil)).Elem() - minAPIVersionForType["HostGraphicsConfigSharedPassthruAssignmentPolicy"] = "6.5" +func (e HostGraphicsConfigSharedPassthruAssignmentPolicy) Values() []HostGraphicsConfigSharedPassthruAssignmentPolicy { + return []HostGraphicsConfigSharedPassthruAssignmentPolicy{ + HostGraphicsConfigSharedPassthruAssignmentPolicyPerformance, + HostGraphicsConfigSharedPassthruAssignmentPolicyConsolidation, + } } +func (e HostGraphicsConfigSharedPassthruAssignmentPolicy) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostGraphicsConfigSharedPassthruAssignmentPolicy"] = reflect.TypeOf((*HostGraphicsConfigSharedPassthruAssignmentPolicy)(nil)).Elem() +} + +type HostGraphicsConfigVgpuMode string + +const ( + // vGPU time-sliced same size. + HostGraphicsConfigVgpuModeSameSize = HostGraphicsConfigVgpuMode("sameSize") + // vGPU time-sliced mixed size. + HostGraphicsConfigVgpuModeMixedSize = HostGraphicsConfigVgpuMode("mixedSize") +) + +func (e HostGraphicsConfigVgpuMode) Values() []HostGraphicsConfigVgpuMode { + return []HostGraphicsConfigVgpuMode{ + HostGraphicsConfigVgpuModeSameSize, + HostGraphicsConfigVgpuModeMixedSize, + } +} + +func (e HostGraphicsConfigVgpuMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostGraphicsConfigVgpuMode"] = reflect.TypeOf((*HostGraphicsConfigVgpuMode)(nil)).Elem() + minAPIVersionForType["HostGraphicsConfigVgpuMode"] = "8.0.3.0" +} + +// Possible values for graphics type. type HostGraphicsInfoGraphicsType string const ( @@ -3051,14 +4762,55 @@ const ( HostGraphicsInfoGraphicsTypeSharedDirect = HostGraphicsInfoGraphicsType("sharedDirect") ) -func init() { - t["HostGraphicsInfoGraphicsType"] = reflect.TypeOf((*HostGraphicsInfoGraphicsType)(nil)).Elem() - minAPIVersionForType["HostGraphicsInfoGraphicsType"] = "5.5" - minAPIVersionForEnumValue["HostGraphicsInfoGraphicsType"] = map[string]string{ - "sharedDirect": "6.5", +func (e HostGraphicsInfoGraphicsType) Values() []HostGraphicsInfoGraphicsType { + return []HostGraphicsInfoGraphicsType{ + HostGraphicsInfoGraphicsTypeBasic, + HostGraphicsInfoGraphicsTypeShared, + HostGraphicsInfoGraphicsTypeDirect, + HostGraphicsInfoGraphicsTypeSharedDirect, } } +func (e HostGraphicsInfoGraphicsType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostGraphicsInfoGraphicsType"] = reflect.TypeOf((*HostGraphicsInfoGraphicsType)(nil)).Elem() +} + +type HostGraphicsInfoVgpuMode string + +const ( + // vGPU mode not applicable. + HostGraphicsInfoVgpuModeNone = HostGraphicsInfoVgpuMode("none") + // vGPU time-sliced same size. + HostGraphicsInfoVgpuModeSameSize = HostGraphicsInfoVgpuMode("sameSize") + // vGPU time-sliced mixed size. + HostGraphicsInfoVgpuModeMixedSize = HostGraphicsInfoVgpuMode("mixedSize") + // vGPU multi-instance GPU. + HostGraphicsInfoVgpuModeMultiInstanceGpu = HostGraphicsInfoVgpuMode("multiInstanceGpu") +) + +func (e HostGraphicsInfoVgpuMode) Values() []HostGraphicsInfoVgpuMode { + return []HostGraphicsInfoVgpuMode{ + HostGraphicsInfoVgpuModeNone, + HostGraphicsInfoVgpuModeSameSize, + HostGraphicsInfoVgpuModeMixedSize, + HostGraphicsInfoVgpuModeMultiInstanceGpu, + } +} + +func (e HostGraphicsInfoVgpuMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostGraphicsInfoVgpuMode"] = reflect.TypeOf((*HostGraphicsInfoVgpuMode)(nil)).Elem() + minAPIVersionForType["HostGraphicsInfoVgpuMode"] = "8.0.3.0" +} + +// The current status of the hardware type HostHardwareElementStatus string const ( @@ -3076,9 +4828,21 @@ const ( HostHardwareElementStatusRed = HostHardwareElementStatus("Red") ) +func (e HostHardwareElementStatus) Values() []HostHardwareElementStatus { + return []HostHardwareElementStatus{ + HostHardwareElementStatusUnknown, + HostHardwareElementStatusGreen, + HostHardwareElementStatusYellow, + HostHardwareElementStatusRed, + } +} + +func (e HostHardwareElementStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostHardwareElementStatus"] = reflect.TypeOf((*HostHardwareElementStatus)(nil)).Elem() - minAPIVersionForType["HostHardwareElementStatus"] = "2.5" } type HostHasComponentFailureHostComponentType string @@ -3087,11 +4851,21 @@ const ( HostHasComponentFailureHostComponentTypeDatastore = HostHasComponentFailureHostComponentType("Datastore") ) -func init() { - t["HostHasComponentFailureHostComponentType"] = reflect.TypeOf((*HostHasComponentFailureHostComponentType)(nil)).Elem() - minAPIVersionForType["HostHasComponentFailureHostComponentType"] = "6.0" +func (e HostHasComponentFailureHostComponentType) Values() []HostHasComponentFailureHostComponentType { + return []HostHasComponentFailureHostComponentType{ + HostHasComponentFailureHostComponentTypeDatastore, + } } +func (e HostHasComponentFailureHostComponentType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostHasComponentFailureHostComponentType"] = reflect.TypeOf((*HostHasComponentFailureHostComponentType)(nil)).Elem() +} + +// Acceptance level definitions type HostImageAcceptanceLevel string const ( @@ -3105,11 +4879,24 @@ const ( HostImageAcceptanceLevelCommunity = HostImageAcceptanceLevel("community") ) -func init() { - t["HostImageAcceptanceLevel"] = reflect.TypeOf((*HostImageAcceptanceLevel)(nil)).Elem() - minAPIVersionForType["HostImageAcceptanceLevel"] = "5.0" +func (e HostImageAcceptanceLevel) Values() []HostImageAcceptanceLevel { + return []HostImageAcceptanceLevel{ + HostImageAcceptanceLevelVmware_certified, + HostImageAcceptanceLevelVmware_accepted, + HostImageAcceptanceLevelPartner, + HostImageAcceptanceLevelCommunity, + } } +func (e HostImageAcceptanceLevel) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostImageAcceptanceLevel"] = reflect.TypeOf((*HostImageAcceptanceLevel)(nil)).Elem() +} + +// Reasons why fault tolerance is not supported on the host. type HostIncompatibleForFaultToleranceReason string const ( @@ -3119,11 +4906,22 @@ const ( HostIncompatibleForFaultToleranceReasonProcessor = HostIncompatibleForFaultToleranceReason("processor") ) -func init() { - t["HostIncompatibleForFaultToleranceReason"] = reflect.TypeOf((*HostIncompatibleForFaultToleranceReason)(nil)).Elem() - minAPIVersionForType["HostIncompatibleForFaultToleranceReason"] = "4.0" +func (e HostIncompatibleForFaultToleranceReason) Values() []HostIncompatibleForFaultToleranceReason { + return []HostIncompatibleForFaultToleranceReason{ + HostIncompatibleForFaultToleranceReasonProduct, + HostIncompatibleForFaultToleranceReasonProcessor, + } } +func (e HostIncompatibleForFaultToleranceReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostIncompatibleForFaultToleranceReason"] = reflect.TypeOf((*HostIncompatibleForFaultToleranceReason)(nil)).Elem() +} + +// Reasons why record/replay is not supported on a host. type HostIncompatibleForRecordReplayReason string const ( @@ -3133,9 +4931,19 @@ const ( HostIncompatibleForRecordReplayReasonProcessor = HostIncompatibleForRecordReplayReason("processor") ) +func (e HostIncompatibleForRecordReplayReason) Values() []HostIncompatibleForRecordReplayReason { + return []HostIncompatibleForRecordReplayReason{ + HostIncompatibleForRecordReplayReasonProduct, + HostIncompatibleForRecordReplayReasonProcessor, + } +} + +func (e HostIncompatibleForRecordReplayReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostIncompatibleForRecordReplayReason"] = reflect.TypeOf((*HostIncompatibleForRecordReplayReason)(nil)).Elem() - minAPIVersionForType["HostIncompatibleForRecordReplayReason"] = "4.0" } // The type of CHAP authentication setting to use. @@ -3146,6 +4954,7 @@ func init() { // discouraged : use non-CHAP, but allow CHAP connectsion as fallback // required : use CHAP for connection strictly, and fail if CHAP // negotiation fails. +// Defaults to preferred on first configuration if unspecified. type HostInternetScsiHbaChapAuthenticationType string const ( @@ -3155,9 +4964,21 @@ const ( HostInternetScsiHbaChapAuthenticationTypeChapRequired = HostInternetScsiHbaChapAuthenticationType("chapRequired") ) +func (e HostInternetScsiHbaChapAuthenticationType) Values() []HostInternetScsiHbaChapAuthenticationType { + return []HostInternetScsiHbaChapAuthenticationType{ + HostInternetScsiHbaChapAuthenticationTypeChapProhibited, + HostInternetScsiHbaChapAuthenticationTypeChapDiscouraged, + HostInternetScsiHbaChapAuthenticationTypeChapPreferred, + HostInternetScsiHbaChapAuthenticationTypeChapRequired, + } +} + +func (e HostInternetScsiHbaChapAuthenticationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostInternetScsiHbaChapAuthenticationType"] = reflect.TypeOf((*HostInternetScsiHbaChapAuthenticationType)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaChapAuthenticationType"] = "4.0" } // The type of integrity checks to use. @@ -3170,6 +4991,7 @@ func init() { // discouraged : do not use digest if target allows, otherwise use digest. // required : use digest strictly, and fail if target does not support // digest. +// Defaults to preferred on first configuration if unspecified. type HostInternetScsiHbaDigestType string const ( @@ -3179,11 +5001,24 @@ const ( HostInternetScsiHbaDigestTypeDigestRequired = HostInternetScsiHbaDigestType("digestRequired") ) -func init() { - t["HostInternetScsiHbaDigestType"] = reflect.TypeOf((*HostInternetScsiHbaDigestType)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaDigestType"] = "4.0" +func (e HostInternetScsiHbaDigestType) Values() []HostInternetScsiHbaDigestType { + return []HostInternetScsiHbaDigestType{ + HostInternetScsiHbaDigestTypeDigestProhibited, + HostInternetScsiHbaDigestTypeDigestDiscouraged, + HostInternetScsiHbaDigestTypeDigestPreferred, + HostInternetScsiHbaDigestTypeDigestRequired, + } } +func (e HostInternetScsiHbaDigestType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostInternetScsiHbaDigestType"] = reflect.TypeOf((*HostInternetScsiHbaDigestType)(nil)).Elem() +} + +// enum listing possible IPv6 address configuration methods. type HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType string const ( @@ -3203,11 +5038,24 @@ const ( HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeOther = HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType("Other") ) -func init() { - t["HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType"] = "6.0" +func (e HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType) Values() []HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType { + return []HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType{ + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeDHCP, + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeAutoConfigured, + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeStatic, + HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationTypeOther, + } } +func (e HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressAddressConfigurationType)(nil)).Elem() +} + +// enum listing IPv6 address operations. type HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation string const ( @@ -3215,11 +5063,22 @@ const ( HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationRemove = HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation("remove") ) -func init() { - t["HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation"] = "6.0" +func (e HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation) Values() []HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation { + return []HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation{ + HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationAdd, + HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperationRemove, + } } +func (e HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6AddressIPv6AddressOperation)(nil)).Elem() +} + +// The binding mode of the adapter. type HostInternetScsiHbaNetworkBindingSupportType string const ( @@ -3228,9 +5087,20 @@ const ( HostInternetScsiHbaNetworkBindingSupportTypeRequired = HostInternetScsiHbaNetworkBindingSupportType("required") ) +func (e HostInternetScsiHbaNetworkBindingSupportType) Values() []HostInternetScsiHbaNetworkBindingSupportType { + return []HostInternetScsiHbaNetworkBindingSupportType{ + HostInternetScsiHbaNetworkBindingSupportTypeNotsupported, + HostInternetScsiHbaNetworkBindingSupportTypeOptional, + HostInternetScsiHbaNetworkBindingSupportTypeRequired, + } +} + +func (e HostInternetScsiHbaNetworkBindingSupportType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostInternetScsiHbaNetworkBindingSupportType"] = reflect.TypeOf((*HostInternetScsiHbaNetworkBindingSupportType)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaNetworkBindingSupportType"] = "5.0" } // The method of discovery of an iScsi target. @@ -3239,6 +5109,7 @@ func init() { // sendTargetsMethod: sendtarget discovery // slpMethod: Service Location Protocol discovery // isnsMethod: Internet Storage Name Service discovery +// unknownMethod: discovery method not identified by iscsi stack type HostInternetScsiHbaStaticTargetTargetDiscoveryMethod string const ( @@ -3249,12 +5120,27 @@ const ( HostInternetScsiHbaStaticTargetTargetDiscoveryMethodUnknownMethod = HostInternetScsiHbaStaticTargetTargetDiscoveryMethod("unknownMethod") ) +func (e HostInternetScsiHbaStaticTargetTargetDiscoveryMethod) Values() []HostInternetScsiHbaStaticTargetTargetDiscoveryMethod { + return []HostInternetScsiHbaStaticTargetTargetDiscoveryMethod{ + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodStaticMethod, + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodSendTargetMethod, + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodSlpMethod, + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodIsnsMethod, + HostInternetScsiHbaStaticTargetTargetDiscoveryMethodUnknownMethod, + } +} + +func (e HostInternetScsiHbaStaticTargetTargetDiscoveryMethod) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostInternetScsiHbaStaticTargetTargetDiscoveryMethod"] = reflect.TypeOf((*HostInternetScsiHbaStaticTargetTargetDiscoveryMethod)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaStaticTargetTargetDiscoveryMethod"] = "5.1" } // This specifies how the ipv6 address is configured for the interface. +// +// We follow rfc4293 in defining the values for the configType. type HostIpConfigIpV6AddressConfigType string const ( @@ -3277,9 +5163,22 @@ const ( HostIpConfigIpV6AddressConfigTypeRandom = HostIpConfigIpV6AddressConfigType("random") ) +func (e HostIpConfigIpV6AddressConfigType) Values() []HostIpConfigIpV6AddressConfigType { + return []HostIpConfigIpV6AddressConfigType{ + HostIpConfigIpV6AddressConfigTypeOther, + HostIpConfigIpV6AddressConfigTypeManual, + HostIpConfigIpV6AddressConfigTypeDhcp, + HostIpConfigIpV6AddressConfigTypeLinklayer, + HostIpConfigIpV6AddressConfigTypeRandom, + } +} + +func (e HostIpConfigIpV6AddressConfigType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostIpConfigIpV6AddressConfigType"] = reflect.TypeOf((*HostIpConfigIpV6AddressConfigType)(nil)).Elem() - minAPIVersionForType["HostIpConfigIpV6AddressConfigType"] = "4.0" } type HostIpConfigIpV6AddressStatus string @@ -3305,11 +5204,27 @@ const ( HostIpConfigIpV6AddressStatusDuplicate = HostIpConfigIpV6AddressStatus("duplicate") ) -func init() { - t["HostIpConfigIpV6AddressStatus"] = reflect.TypeOf((*HostIpConfigIpV6AddressStatus)(nil)).Elem() - minAPIVersionForType["HostIpConfigIpV6AddressStatus"] = "4.0" +func (e HostIpConfigIpV6AddressStatus) Values() []HostIpConfigIpV6AddressStatus { + return []HostIpConfigIpV6AddressStatus{ + HostIpConfigIpV6AddressStatusPreferred, + HostIpConfigIpV6AddressStatusDeprecated, + HostIpConfigIpV6AddressStatusInvalid, + HostIpConfigIpV6AddressStatusInaccessible, + HostIpConfigIpV6AddressStatusUnknown, + HostIpConfigIpV6AddressStatusTentative, + HostIpConfigIpV6AddressStatusDuplicate, + } } +func (e HostIpConfigIpV6AddressStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostIpConfigIpV6AddressStatus"] = reflect.TypeOf((*HostIpConfigIpV6AddressStatus)(nil)).Elem() +} + +// Identifiers of currently supported resources. type HostLicensableResourceKey string const ( @@ -3325,13 +5240,34 @@ const ( HostLicensableResourceKeyNumVmsStarted = HostLicensableResourceKey("numVmsStarted") // Number of VMs that are currently powering-on, immigrating, etc. HostLicensableResourceKeyNumVmsStarting = HostLicensableResourceKey("numVmsStarting") + // vSAN capacity in TiB on this host. + HostLicensableResourceKeyVsanCapacity = HostLicensableResourceKey("vsanCapacity") ) +func (e HostLicensableResourceKey) Values() []HostLicensableResourceKey { + return []HostLicensableResourceKey{ + HostLicensableResourceKeyNumCpuPackages, + HostLicensableResourceKeyNumCpuCores, + HostLicensableResourceKeyMemorySize, + HostLicensableResourceKeyMemoryForVms, + HostLicensableResourceKeyNumVmsStarted, + HostLicensableResourceKeyNumVmsStarting, + HostLicensableResourceKeyVsanCapacity, + } +} + +func (e HostLicensableResourceKey) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostLicensableResourceKey"] = reflect.TypeOf((*HostLicensableResourceKey)(nil)).Elem() - minAPIVersionForType["HostLicensableResourceKey"] = "5.0" + minAPIVersionForEnumValue["HostLicensableResourceKey"] = map[string]string{ + "vsanCapacity": "8.0.3.0", + } } +// Defines the possible states of lockdown mode. type HostLockdownMode string const ( @@ -3348,12 +5284,24 @@ const ( HostLockdownModeLockdownStrict = HostLockdownMode("lockdownStrict") ) +func (e HostLockdownMode) Values() []HostLockdownMode { + return []HostLockdownMode{ + HostLockdownModeLockdownDisabled, + HostLockdownModeLockdownNormal, + HostLockdownModeLockdownStrict, + } +} + +func (e HostLockdownMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostLockdownMode"] = reflect.TypeOf((*HostLockdownMode)(nil)).Elem() - minAPIVersionForType["HostLockdownMode"] = "6.0" } // This enum defines the possible types of file types that can be reserved +// or deleted type HostLowLevelProvisioningManagerFileType string const ( @@ -3362,11 +5310,23 @@ const ( HostLowLevelProvisioningManagerFileTypeDirectory = HostLowLevelProvisioningManagerFileType("Directory") ) -func init() { - t["HostLowLevelProvisioningManagerFileType"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileType)(nil)).Elem() - minAPIVersionForType["HostLowLevelProvisioningManagerFileType"] = "6.0" +func (e HostLowLevelProvisioningManagerFileType) Values() []HostLowLevelProvisioningManagerFileType { + return []HostLowLevelProvisioningManagerFileType{ + HostLowLevelProvisioningManagerFileTypeFile, + HostLowLevelProvisioningManagerFileTypeVirtualDisk, + HostLowLevelProvisioningManagerFileTypeDirectory, + } } +func (e HostLowLevelProvisioningManagerFileType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostLowLevelProvisioningManagerFileType"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileType)(nil)).Elem() +} + +// The target of the disk reload. type HostLowLevelProvisioningManagerReloadTarget string const ( @@ -3379,9 +5339,19 @@ const ( HostLowLevelProvisioningManagerReloadTargetSnapshotConfig = HostLowLevelProvisioningManagerReloadTarget("snapshotConfig") ) +func (e HostLowLevelProvisioningManagerReloadTarget) Values() []HostLowLevelProvisioningManagerReloadTarget { + return []HostLowLevelProvisioningManagerReloadTarget{ + HostLowLevelProvisioningManagerReloadTargetCurrentConfig, + HostLowLevelProvisioningManagerReloadTargetSnapshotConfig, + } +} + +func (e HostLowLevelProvisioningManagerReloadTarget) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostLowLevelProvisioningManagerReloadTarget"] = reflect.TypeOf((*HostLowLevelProvisioningManagerReloadTarget)(nil)).Elem() - minAPIVersionForType["HostLowLevelProvisioningManagerReloadTarget"] = "4.0" } type HostMaintenanceSpecPurpose string @@ -3390,9 +5360,18 @@ const ( HostMaintenanceSpecPurposeHostUpgrade = HostMaintenanceSpecPurpose("hostUpgrade") ) +func (e HostMaintenanceSpecPurpose) Values() []HostMaintenanceSpecPurpose { + return []HostMaintenanceSpecPurpose{ + HostMaintenanceSpecPurposeHostUpgrade, + } +} + +func (e HostMaintenanceSpecPurpose) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostMaintenanceSpecPurpose"] = reflect.TypeOf((*HostMaintenanceSpecPurpose)(nil)).Elem() - minAPIVersionForType["HostMaintenanceSpecPurpose"] = "7.0" } // Enumeration of flags pertaining to a memory tier. @@ -3428,11 +5407,29 @@ const ( HostMemoryTierFlagsPersistentTier = HostMemoryTierFlags("persistentTier") // Flag indicating that the tier is a cache for main memory. HostMemoryTierFlagsCachingTier = HostMemoryTierFlags("cachingTier") + // `**Since:**` vSphere API Release 8.0.3.0 + HostMemoryTierFlagsUnmappableTier = HostMemoryTierFlags("unmappableTier") ) +func (e HostMemoryTierFlags) Values() []HostMemoryTierFlags { + return []HostMemoryTierFlags{ + HostMemoryTierFlagsMemoryTier, + HostMemoryTierFlagsPersistentTier, + HostMemoryTierFlagsCachingTier, + HostMemoryTierFlagsUnmappableTier, + } +} + +func (e HostMemoryTierFlags) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostMemoryTierFlags"] = reflect.TypeOf((*HostMemoryTierFlags)(nil)).Elem() minAPIVersionForType["HostMemoryTierFlags"] = "7.0.3.0" + minAPIVersionForEnumValue["HostMemoryTierFlags"] = map[string]string{ + "unmappableTier": "8.0.3.0", + } } type HostMemoryTierType string @@ -3442,11 +5439,28 @@ const ( HostMemoryTierTypeDRAM = HostMemoryTierType("DRAM") // Persistent memory. HostMemoryTierTypePMem = HostMemoryTierType("PMem") + // NVMe memory. + HostMemoryTierTypeNVMe = HostMemoryTierType("NVMe") ) +func (e HostMemoryTierType) Values() []HostMemoryTierType { + return []HostMemoryTierType{ + HostMemoryTierTypeDRAM, + HostMemoryTierTypePMem, + HostMemoryTierTypeNVMe, + } +} + +func (e HostMemoryTierType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostMemoryTierType"] = reflect.TypeOf((*HostMemoryTierType)(nil)).Elem() minAPIVersionForType["HostMemoryTierType"] = "7.0.3.0" + minAPIVersionForEnumValue["HostMemoryTierType"] = map[string]string{ + "NVMe": "8.0.3.0", + } } type HostMemoryTieringType string @@ -3459,11 +5473,28 @@ const ( // // Intel's Memory Mode. HostMemoryTieringTypeHardwareTiering = HostMemoryTieringType("hardwareTiering") + // The memory configuration where all memory tiers are managed by software (ESX). + HostMemoryTieringTypeSoftwareTiering = HostMemoryTieringType("softwareTiering") ) +func (e HostMemoryTieringType) Values() []HostMemoryTieringType { + return []HostMemoryTieringType{ + HostMemoryTieringTypeNoTiering, + HostMemoryTieringTypeHardwareTiering, + HostMemoryTieringTypeSoftwareTiering, + } +} + +func (e HostMemoryTieringType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostMemoryTieringType"] = reflect.TypeOf((*HostMemoryTieringType)(nil)).Elem() minAPIVersionForType["HostMemoryTieringType"] = "7.0.3.0" + minAPIVersionForEnumValue["HostMemoryTieringType"] = map[string]string{ + "softwareTiering": "8.0.3.0", + } } // A datastore can become inaccessible due to a number of reasons as @@ -3485,6 +5516,7 @@ func init() { // PDL is not linked to the APD and can happen at any time with or without APD // preceding. If APD and PDL occur at the same time, APD will be reported first. // Once (and if) the APD condition clears, PermanentDataLoss will be reported if +// PDL condition still exists. type HostMountInfoInaccessibleReason string const ( @@ -3503,9 +5535,20 @@ const ( HostMountInfoInaccessibleReasonPermanentDeviceLoss = HostMountInfoInaccessibleReason("PermanentDeviceLoss") ) +func (e HostMountInfoInaccessibleReason) Values() []HostMountInfoInaccessibleReason { + return []HostMountInfoInaccessibleReason{ + HostMountInfoInaccessibleReasonAllPathsDown_Start, + HostMountInfoInaccessibleReasonAllPathsDown_Timeout, + HostMountInfoInaccessibleReasonPermanentDeviceLoss, + } +} + +func (e HostMountInfoInaccessibleReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostMountInfoInaccessibleReason"] = reflect.TypeOf((*HostMountInfoInaccessibleReason)(nil)).Elem() - minAPIVersionForType["HostMountInfoInaccessibleReason"] = "5.1" } // NFS mount request can be failed due to a number of reasons as @@ -3538,6 +5581,24 @@ const ( HostMountInfoMountFailedReasonOTHERS = HostMountInfoMountFailedReason("OTHERS") ) +func (e HostMountInfoMountFailedReason) Values() []HostMountInfoMountFailedReason { + return []HostMountInfoMountFailedReason{ + HostMountInfoMountFailedReasonCONNECT_FAILURE, + HostMountInfoMountFailedReasonMOUNT_NOT_SUPPORTED, + HostMountInfoMountFailedReasonNFS_NOT_SUPPORTED, + HostMountInfoMountFailedReasonMOUNT_DENIED, + HostMountInfoMountFailedReasonMOUNT_NOT_DIR, + HostMountInfoMountFailedReasonVOLUME_LIMIT_EXCEEDED, + HostMountInfoMountFailedReasonCONN_LIMIT_EXCEEDED, + HostMountInfoMountFailedReasonMOUNT_EXISTS, + HostMountInfoMountFailedReasonOTHERS, + } +} + +func (e HostMountInfoMountFailedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostMountInfoMountFailedReason"] = reflect.TypeOf((*HostMountInfoMountFailedReason)(nil)).Elem() minAPIVersionForType["HostMountInfoMountFailedReason"] = "8.0.0.1" @@ -3553,10 +5614,22 @@ const ( HostMountModeReadOnly = HostMountMode("readOnly") ) +func (e HostMountMode) Values() []HostMountMode { + return []HostMountMode{ + HostMountModeReadWrite, + HostMountModeReadOnly, + } +} + +func (e HostMountMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostMountMode"] = reflect.TypeOf((*HostMountMode)(nil)).Elem() } +// Security type supported. type HostNasVolumeSecurityType string const ( @@ -3583,14 +5656,23 @@ const ( HostNasVolumeSecurityTypeSEC_KRB5I = HostNasVolumeSecurityType("SEC_KRB5I") ) -func init() { - t["HostNasVolumeSecurityType"] = reflect.TypeOf((*HostNasVolumeSecurityType)(nil)).Elem() - minAPIVersionForType["HostNasVolumeSecurityType"] = "6.0" - minAPIVersionForEnumValue["HostNasVolumeSecurityType"] = map[string]string{ - "SEC_KRB5I": "6.5", +func (e HostNasVolumeSecurityType) Values() []HostNasVolumeSecurityType { + return []HostNasVolumeSecurityType{ + HostNasVolumeSecurityTypeAUTH_SYS, + HostNasVolumeSecurityTypeSEC_KRB5, + HostNasVolumeSecurityTypeSEC_KRB5I, } } +func (e HostNasVolumeSecurityType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostNasVolumeSecurityType"] = reflect.TypeOf((*HostNasVolumeSecurityType)(nil)).Elem() +} + +// Define TCP congestion control algorithm used by an instance type HostNetStackInstanceCongestionControlAlgorithmType string const ( @@ -3604,11 +5686,22 @@ const ( HostNetStackInstanceCongestionControlAlgorithmTypeCubic = HostNetStackInstanceCongestionControlAlgorithmType("cubic") ) -func init() { - t["HostNetStackInstanceCongestionControlAlgorithmType"] = reflect.TypeOf((*HostNetStackInstanceCongestionControlAlgorithmType)(nil)).Elem() - minAPIVersionForType["HostNetStackInstanceCongestionControlAlgorithmType"] = "5.5" +func (e HostNetStackInstanceCongestionControlAlgorithmType) Values() []HostNetStackInstanceCongestionControlAlgorithmType { + return []HostNetStackInstanceCongestionControlAlgorithmType{ + HostNetStackInstanceCongestionControlAlgorithmTypeNewreno, + HostNetStackInstanceCongestionControlAlgorithmTypeCubic, + } } +func (e HostNetStackInstanceCongestionControlAlgorithmType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostNetStackInstanceCongestionControlAlgorithmType"] = reflect.TypeOf((*HostNetStackInstanceCongestionControlAlgorithmType)(nil)).Elem() +} + +// Define the instance identifier for different traffic type type HostNetStackInstanceSystemStackKey string const ( @@ -3624,18 +5717,31 @@ const ( HostNetStackInstanceSystemStackKeyOps = HostNetStackInstanceSystemStackKey("ops") ) +func (e HostNetStackInstanceSystemStackKey) Values() []HostNetStackInstanceSystemStackKey { + return []HostNetStackInstanceSystemStackKey{ + HostNetStackInstanceSystemStackKeyDefaultTcpipStack, + HostNetStackInstanceSystemStackKeyVmotion, + HostNetStackInstanceSystemStackKeyVSphereProvisioning, + HostNetStackInstanceSystemStackKeyMirror, + HostNetStackInstanceSystemStackKeyOps, + } +} + +func (e HostNetStackInstanceSystemStackKey) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostNetStackInstanceSystemStackKey"] = reflect.TypeOf((*HostNetStackInstanceSystemStackKey)(nil)).Elem() - minAPIVersionForType["HostNetStackInstanceSystemStackKey"] = "5.5" minAPIVersionForEnumValue["HostNetStackInstanceSystemStackKey"] = map[string]string{ - "vmotion": "6.0", - "vSphereProvisioning": "6.0", - "mirror": "8.0.0.1", - "ops": "8.0.0.1", + "mirror": "8.0.0.1", + "ops": "8.0.0.1", } } // Health state of the numeric sensor as reported by the sensor probes. +// +// Same data reported using command line: esxcli hardware ipmi sdr list type HostNumericSensorHealthState string const ( @@ -3654,12 +5760,25 @@ const ( HostNumericSensorHealthStateRed = HostNumericSensorHealthState("red") ) +func (e HostNumericSensorHealthState) Values() []HostNumericSensorHealthState { + return []HostNumericSensorHealthState{ + HostNumericSensorHealthStateUnknown, + HostNumericSensorHealthStateGreen, + HostNumericSensorHealthStateYellow, + HostNumericSensorHealthStateRed, + } +} + +func (e HostNumericSensorHealthState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostNumericSensorHealthState"] = reflect.TypeOf((*HostNumericSensorHealthState)(nil)).Elem() - minAPIVersionForType["HostNumericSensorHealthState"] = "2.5" } // Sensor Types for specific hardware component are either based on +// class of sensor or what the sensor monitors to allow for grouping type HostNumericSensorType string const ( @@ -3691,19 +5810,30 @@ const ( HostNumericSensorTypeWatchdog = HostNumericSensorType("watchdog") ) +func (e HostNumericSensorType) Values() []HostNumericSensorType { + return []HostNumericSensorType{ + HostNumericSensorTypeFan, + HostNumericSensorTypePower, + HostNumericSensorTypeTemperature, + HostNumericSensorTypeVoltage, + HostNumericSensorTypeOther, + HostNumericSensorTypeProcessor, + HostNumericSensorTypeMemory, + HostNumericSensorTypeStorage, + HostNumericSensorTypeSystemBoard, + HostNumericSensorTypeBattery, + HostNumericSensorTypeBios, + HostNumericSensorTypeCable, + HostNumericSensorTypeWatchdog, + } +} + +func (e HostNumericSensorType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostNumericSensorType"] = reflect.TypeOf((*HostNumericSensorType)(nil)).Elem() - minAPIVersionForType["HostNumericSensorType"] = "2.5" - minAPIVersionForEnumValue["HostNumericSensorType"] = map[string]string{ - "processor": "6.5", - "memory": "6.5", - "storage": "6.5", - "systemBoard": "6.5", - "battery": "6.5", - "bios": "6.5", - "cable": "6.5", - "watchdog": "6.5", - } } // This enum represents the supported NVM subsystem types. @@ -3716,6 +5846,17 @@ const ( HostNvmeDiscoveryLogSubsystemTypeNvm = HostNvmeDiscoveryLogSubsystemType("nvm") ) +func (e HostNvmeDiscoveryLogSubsystemType) Values() []HostNvmeDiscoveryLogSubsystemType { + return []HostNvmeDiscoveryLogSubsystemType{ + HostNvmeDiscoveryLogSubsystemTypeDiscovery, + HostNvmeDiscoveryLogSubsystemTypeNvm, + } +} + +func (e HostNvmeDiscoveryLogSubsystemType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostNvmeDiscoveryLogSubsystemType"] = reflect.TypeOf((*HostNvmeDiscoveryLogSubsystemType)(nil)).Elem() } @@ -3732,6 +5873,18 @@ const ( HostNvmeDiscoveryLogTransportRequirementsRequirementsNotSpecified = HostNvmeDiscoveryLogTransportRequirements("requirementsNotSpecified") ) +func (e HostNvmeDiscoveryLogTransportRequirements) Values() []HostNvmeDiscoveryLogTransportRequirements { + return []HostNvmeDiscoveryLogTransportRequirements{ + HostNvmeDiscoveryLogTransportRequirementsSecureChannelRequired, + HostNvmeDiscoveryLogTransportRequirementsSecureChannelNotRequired, + HostNvmeDiscoveryLogTransportRequirementsRequirementsNotSpecified, + } +} + +func (e HostNvmeDiscoveryLogTransportRequirements) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostNvmeDiscoveryLogTransportRequirements"] = reflect.TypeOf((*HostNvmeDiscoveryLogTransportRequirements)(nil)).Elem() } @@ -3741,6 +5894,7 @@ func init() { // // For details, see: // - "NVM Express over Fabrics 1.0", Section 5.3, Figure 34, +// "Discovery Log Page Entry" type HostNvmeTransportParametersNvmeAddressFamily string const ( @@ -3758,15 +5912,30 @@ const ( HostNvmeTransportParametersNvmeAddressFamilyUnknown = HostNvmeTransportParametersNvmeAddressFamily("unknown") ) +func (e HostNvmeTransportParametersNvmeAddressFamily) Values() []HostNvmeTransportParametersNvmeAddressFamily { + return []HostNvmeTransportParametersNvmeAddressFamily{ + HostNvmeTransportParametersNvmeAddressFamilyIpv4, + HostNvmeTransportParametersNvmeAddressFamilyIpv6, + HostNvmeTransportParametersNvmeAddressFamilyInfiniBand, + HostNvmeTransportParametersNvmeAddressFamilyFc, + HostNvmeTransportParametersNvmeAddressFamilyLoopback, + HostNvmeTransportParametersNvmeAddressFamilyUnknown, + } +} + +func (e HostNvmeTransportParametersNvmeAddressFamily) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostNvmeTransportParametersNvmeAddressFamily"] = reflect.TypeOf((*HostNvmeTransportParametersNvmeAddressFamily)(nil)).Elem() - minAPIVersionForType["HostNvmeTransportParametersNvmeAddressFamily"] = "7.0" } // The set of NVM Express over Fabrics transport types. // // For details, see: // - "NVM Express over Fabrics 1.0", Section 1.5.1, +// "Fabrics and Transports". type HostNvmeTransportType string const ( @@ -3784,9 +5953,23 @@ const ( HostNvmeTransportTypeUnsupported = HostNvmeTransportType("unsupported") ) +func (e HostNvmeTransportType) Values() []HostNvmeTransportType { + return []HostNvmeTransportType{ + HostNvmeTransportTypePcie, + HostNvmeTransportTypeFibreChannel, + HostNvmeTransportTypeRdma, + HostNvmeTransportTypeTcp, + HostNvmeTransportTypeLoopback, + HostNvmeTransportTypeUnsupported, + } +} + +func (e HostNvmeTransportType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostNvmeTransportType"] = reflect.TypeOf((*HostNvmeTransportType)(nil)).Elem() - minAPIVersionForType["HostNvmeTransportType"] = "7.0" minAPIVersionForEnumValue["HostNvmeTransportType"] = map[string]string{ "tcp": "7.0.3.0", } @@ -3805,14 +5988,90 @@ const ( HostOpaqueSwitchOpaqueSwitchStateMaintenance = HostOpaqueSwitchOpaqueSwitchState("maintenance") ) +func (e HostOpaqueSwitchOpaqueSwitchState) Values() []HostOpaqueSwitchOpaqueSwitchState { + return []HostOpaqueSwitchOpaqueSwitchState{ + HostOpaqueSwitchOpaqueSwitchStateUp, + HostOpaqueSwitchOpaqueSwitchStateWarning, + HostOpaqueSwitchOpaqueSwitchStateDown, + HostOpaqueSwitchOpaqueSwitchStateMaintenance, + } +} + +func (e HostOpaqueSwitchOpaqueSwitchState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostOpaqueSwitchOpaqueSwitchState"] = reflect.TypeOf((*HostOpaqueSwitchOpaqueSwitchState)(nil)).Elem() - minAPIVersionForType["HostOpaqueSwitchOpaqueSwitchState"] = "6.0" - minAPIVersionForEnumValue["HostOpaqueSwitchOpaqueSwitchState"] = map[string]string{ - "maintenance": "7.0", +} + +// The following enum describes some common kinds of partial maintenance modes, +type HostPartialMaintenanceModeId string + +const ( + // When the host is in the quick patch partial maintenance mode, it is safe to + // perform a quick patch. + // + // When the host is in this partial maintenance mode, any virtual machines + // and/or pods placed on it will continue to run but operations which may + // lead to new workloads starting on the host such as power on or incoming + // vmotions may be blocked. + // It is generally unsafe to reboot the host in this state. + HostPartialMaintenanceModeIdQuickPatchPartialMM = HostPartialMaintenanceModeId("quickPatchPartialMM") +) + +func (e HostPartialMaintenanceModeId) Values() []HostPartialMaintenanceModeId { + return []HostPartialMaintenanceModeId{ + HostPartialMaintenanceModeIdQuickPatchPartialMM, } } +func (e HostPartialMaintenanceModeId) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostPartialMaintenanceModeId"] = reflect.TypeOf((*HostPartialMaintenanceModeId)(nil)).Elem() + minAPIVersionForType["HostPartialMaintenanceModeId"] = "8.0.3.0" + minAPIVersionForEnumValue["HostPartialMaintenanceModeId"] = map[string]string{ + "quickPatchPartialMM": "8.0.3.0", + } +} + +// The following enum contains the list of possible statuses associated +type HostPartialMaintenanceModeStatus string + +const ( + // The host is not in the particular partial maintenance mode. + HostPartialMaintenanceModeStatusNotInPartialMM = HostPartialMaintenanceModeStatus("notInPartialMM") + // The host is in the process of entering the particular partial maintenance + // mode. + HostPartialMaintenanceModeStatusEnteringPartialMM = HostPartialMaintenanceModeStatus("enteringPartialMM") + // The host is in the process of exiting the particular partial maintenance + // mode. + HostPartialMaintenanceModeStatusExitingPartialMM = HostPartialMaintenanceModeStatus("exitingPartialMM") + // The host is in the particular partial maintenance mode. + HostPartialMaintenanceModeStatusInPartialMM = HostPartialMaintenanceModeStatus("inPartialMM") +) + +func (e HostPartialMaintenanceModeStatus) Values() []HostPartialMaintenanceModeStatus { + return []HostPartialMaintenanceModeStatus{ + HostPartialMaintenanceModeStatusNotInPartialMM, + HostPartialMaintenanceModeStatusEnteringPartialMM, + HostPartialMaintenanceModeStatusExitingPartialMM, + HostPartialMaintenanceModeStatusInPartialMM, + } +} + +func (e HostPartialMaintenanceModeStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostPartialMaintenanceModeStatus"] = reflect.TypeOf((*HostPartialMaintenanceModeStatus)(nil)).Elem() + minAPIVersionForType["HostPartialMaintenanceModeStatus"] = "8.0.3.0" +} + // The installation state if the update is installed on the server. type HostPatchManagerInstallState string @@ -3823,6 +6082,17 @@ const ( HostPatchManagerInstallStateImageActive = HostPatchManagerInstallState("imageActive") ) +func (e HostPatchManagerInstallState) Values() []HostPatchManagerInstallState { + return []HostPatchManagerInstallState{ + HostPatchManagerInstallStateHostRestarted, + HostPatchManagerInstallStateImageActive, + } +} + +func (e HostPatchManagerInstallState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostPatchManagerInstallState"] = reflect.TypeOf((*HostPatchManagerInstallState)(nil)).Elem() } @@ -3848,6 +6118,22 @@ const ( HostPatchManagerIntegrityStatusValidationError = HostPatchManagerIntegrityStatus("validationError") ) +func (e HostPatchManagerIntegrityStatus) Values() []HostPatchManagerIntegrityStatus { + return []HostPatchManagerIntegrityStatus{ + HostPatchManagerIntegrityStatusValidated, + HostPatchManagerIntegrityStatusKeyNotFound, + HostPatchManagerIntegrityStatusKeyRevoked, + HostPatchManagerIntegrityStatusKeyExpired, + HostPatchManagerIntegrityStatusDigestMismatch, + HostPatchManagerIntegrityStatusNotEnoughSignatures, + HostPatchManagerIntegrityStatusValidationError, + } +} + +func (e HostPatchManagerIntegrityStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostPatchManagerIntegrityStatus"] = reflect.TypeOf((*HostPatchManagerIntegrityStatus)(nil)).Elem() } @@ -3875,6 +6161,21 @@ const ( HostPatchManagerReasonConflictLib = HostPatchManagerReason("conflictLib") ) +func (e HostPatchManagerReason) Values() []HostPatchManagerReason { + return []HostPatchManagerReason{ + HostPatchManagerReasonObsoleted, + HostPatchManagerReasonMissingPatch, + HostPatchManagerReasonMissingLib, + HostPatchManagerReasonHasDependentPatch, + HostPatchManagerReasonConflictPatch, + HostPatchManagerReasonConflictLib, + } +} + +func (e HostPatchManagerReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostPatchManagerReason"] = reflect.TypeOf((*HostPatchManagerReason)(nil)).Elem() } @@ -3891,12 +6192,23 @@ const ( HostPowerOperationTypePowerOff = HostPowerOperationType("powerOff") ) +func (e HostPowerOperationType) Values() []HostPowerOperationType { + return []HostPowerOperationType{ + HostPowerOperationTypePowerOn, + HostPowerOperationTypePowerOff, + } +} + +func (e HostPowerOperationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostPowerOperationType"] = reflect.TypeOf((*HostPowerOperationType)(nil)).Elem() - minAPIVersionForType["HostPowerOperationType"] = "2.5" } // The `HostProfileManagerAnswerFileStatus_enum` enum +// defines possible values for answer file status. type HostProfileManagerAnswerFileStatus string const ( @@ -3905,25 +6217,37 @@ const ( // Answer file is not valid. // // The file is either missing or incomplete. - // - To produce an answer file, pass host-specific data (user input) to the - // `HostProfileManager*.*HostProfileManager.ApplyHostConfig_Task` - // method. - // - To produce a complete answer file, call the - // `HostProfile*.*HostProfile.ExecuteHostProfile` - // method and fill in any missing parameters in the returned - // `ProfileExecuteResult*.*ProfileExecuteResult.requireInput` - // list. After you execute the profile successfully, you can pass the complete required - // input list to the apply method. + // - To produce an answer file, pass host-specific data (user input) to the + // `HostProfileManager*.*HostProfileManager.ApplyHostConfig_Task` + // method. + // - To produce a complete answer file, call the + // `HostProfile*.*HostProfile.ExecuteHostProfile` + // method and fill in any missing parameters in the returned + // `ProfileExecuteResult*.*ProfileExecuteResult.requireInput` + // list. After you execute the profile successfully, you can pass the complete required + // input list to the apply method. HostProfileManagerAnswerFileStatusInvalid = HostProfileManagerAnswerFileStatus("invalid") // Answer file status is not known. HostProfileManagerAnswerFileStatusUnknown = HostProfileManagerAnswerFileStatus("unknown") ) -func init() { - t["HostProfileManagerAnswerFileStatus"] = reflect.TypeOf((*HostProfileManagerAnswerFileStatus)(nil)).Elem() - minAPIVersionForType["HostProfileManagerAnswerFileStatus"] = "5.0" +func (e HostProfileManagerAnswerFileStatus) Values() []HostProfileManagerAnswerFileStatus { + return []HostProfileManagerAnswerFileStatus{ + HostProfileManagerAnswerFileStatusValid, + HostProfileManagerAnswerFileStatusInvalid, + HostProfileManagerAnswerFileStatusUnknown, + } } +func (e HostProfileManagerAnswerFileStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostProfileManagerAnswerFileStatus"] = reflect.TypeOf((*HostProfileManagerAnswerFileStatus)(nil)).Elem() +} + +// The composition status class. type HostProfileManagerCompositionResultResultElementStatus string const ( @@ -3931,11 +6255,22 @@ const ( HostProfileManagerCompositionResultResultElementStatusError = HostProfileManagerCompositionResultResultElementStatus("error") ) -func init() { - t["HostProfileManagerCompositionResultResultElementStatus"] = reflect.TypeOf((*HostProfileManagerCompositionResultResultElementStatus)(nil)).Elem() - minAPIVersionForType["HostProfileManagerCompositionResultResultElementStatus"] = "6.5" +func (e HostProfileManagerCompositionResultResultElementStatus) Values() []HostProfileManagerCompositionResultResultElementStatus { + return []HostProfileManagerCompositionResultResultElementStatus{ + HostProfileManagerCompositionResultResultElementStatusSuccess, + HostProfileManagerCompositionResultResultElementStatusError, + } } +func (e HostProfileManagerCompositionResultResultElementStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostProfileManagerCompositionResultResultElementStatus"] = reflect.TypeOf((*HostProfileManagerCompositionResultResultElementStatus)(nil)).Elem() +} + +// The composition validation status class. type HostProfileManagerCompositionValidationResultResultElementStatus string const ( @@ -3943,14 +6278,25 @@ const ( HostProfileManagerCompositionValidationResultResultElementStatusError = HostProfileManagerCompositionValidationResultResultElementStatus("error") ) +func (e HostProfileManagerCompositionValidationResultResultElementStatus) Values() []HostProfileManagerCompositionValidationResultResultElementStatus { + return []HostProfileManagerCompositionValidationResultResultElementStatus{ + HostProfileManagerCompositionValidationResultResultElementStatusSuccess, + HostProfileManagerCompositionValidationResultResultElementStatusError, + } +} + +func (e HostProfileManagerCompositionValidationResultResultElementStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostProfileManagerCompositionValidationResultResultElementStatus"] = reflect.TypeOf((*HostProfileManagerCompositionValidationResultResultElementStatus)(nil)).Elem() - minAPIVersionForType["HostProfileManagerCompositionValidationResultResultElementStatus"] = "6.5" } // The `HostProfileManagerTaskListRequirement_enum` enum // defines possible values for requirements when applying a `HostConfigSpec` // object returned as part of a generateConfigTaskList +// operation. type HostProfileManagerTaskListRequirement string const ( @@ -3963,11 +6309,22 @@ const ( HostProfileManagerTaskListRequirementRebootRequired = HostProfileManagerTaskListRequirement("rebootRequired") ) -func init() { - t["HostProfileManagerTaskListRequirement"] = reflect.TypeOf((*HostProfileManagerTaskListRequirement)(nil)).Elem() - minAPIVersionForType["HostProfileManagerTaskListRequirement"] = "6.0" +func (e HostProfileManagerTaskListRequirement) Values() []HostProfileManagerTaskListRequirement { + return []HostProfileManagerTaskListRequirement{ + HostProfileManagerTaskListRequirementMaintenanceModeRequired, + HostProfileManagerTaskListRequirementRebootRequired, + } } +func (e HostProfileManagerTaskListRequirement) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostProfileManagerTaskListRequirement"] = reflect.TypeOf((*HostProfileManagerTaskListRequirement)(nil)).Elem() +} + +// Types of host profile update. type HostProfileValidationFailureInfoUpdateType string const ( @@ -3981,11 +6338,24 @@ const ( HostProfileValidationFailureInfoUpdateTypeCompose = HostProfileValidationFailureInfoUpdateType("Compose") ) -func init() { - t["HostProfileValidationFailureInfoUpdateType"] = reflect.TypeOf((*HostProfileValidationFailureInfoUpdateType)(nil)).Elem() - minAPIVersionForType["HostProfileValidationFailureInfoUpdateType"] = "6.7" +func (e HostProfileValidationFailureInfoUpdateType) Values() []HostProfileValidationFailureInfoUpdateType { + return []HostProfileValidationFailureInfoUpdateType{ + HostProfileValidationFailureInfoUpdateTypeHostBased, + HostProfileValidationFailureInfoUpdateTypeImport, + HostProfileValidationFailureInfoUpdateTypeEdit, + HostProfileValidationFailureInfoUpdateTypeCompose, + } } +func (e HostProfileValidationFailureInfoUpdateType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostProfileValidationFailureInfoUpdateType"] = reflect.TypeOf((*HostProfileValidationFailureInfoUpdateType)(nil)).Elem() +} + +// This defines validation state values for host profile. type HostProfileValidationState string const ( @@ -3994,12 +6364,25 @@ const ( HostProfileValidationStateFailed = HostProfileValidationState("Failed") ) +func (e HostProfileValidationState) Values() []HostProfileValidationState { + return []HostProfileValidationState{ + HostProfileValidationStateReady, + HostProfileValidationStateRunning, + HostProfileValidationStateFailed, + } +} + +func (e HostProfileValidationState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostProfileValidationState"] = reflect.TypeOf((*HostProfileValidationState)(nil)).Elem() - minAPIVersionForType["HostProfileValidationState"] = "6.7" } // Deprecated from all vmodl version above @released("6.0"). +// +// ProtocolEndpoint Type. type HostProtocolEndpointPEType string const ( @@ -4007,11 +6390,22 @@ const ( HostProtocolEndpointPETypeNas = HostProtocolEndpointPEType("nas") ) -func init() { - t["HostProtocolEndpointPEType"] = reflect.TypeOf((*HostProtocolEndpointPEType)(nil)).Elem() - minAPIVersionForType["HostProtocolEndpointPEType"] = "6.0" +func (e HostProtocolEndpointPEType) Values() []HostProtocolEndpointPEType { + return []HostProtocolEndpointPEType{ + HostProtocolEndpointPETypeBlock, + HostProtocolEndpointPETypeNas, + } } +func (e HostProtocolEndpointPEType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostProtocolEndpointPEType"] = reflect.TypeOf((*HostProtocolEndpointPEType)(nil)).Elem() +} + +// ProtocolEndpoint type. type HostProtocolEndpointProtocolEndpointType string const ( @@ -4020,9 +6414,20 @@ const ( HostProtocolEndpointProtocolEndpointTypeNfs4x = HostProtocolEndpointProtocolEndpointType("nfs4x") ) +func (e HostProtocolEndpointProtocolEndpointType) Values() []HostProtocolEndpointProtocolEndpointType { + return []HostProtocolEndpointProtocolEndpointType{ + HostProtocolEndpointProtocolEndpointTypeScsi, + HostProtocolEndpointProtocolEndpointTypeNfs, + HostProtocolEndpointProtocolEndpointTypeNfs4x, + } +} + +func (e HostProtocolEndpointProtocolEndpointType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostProtocolEndpointProtocolEndpointType"] = reflect.TypeOf((*HostProtocolEndpointProtocolEndpointType)(nil)).Elem() - minAPIVersionForType["HostProtocolEndpointProtocolEndpointType"] = "6.5" } type HostPtpConfigDeviceType string @@ -4041,6 +6446,18 @@ const ( HostPtpConfigDeviceTypePciPassthruNic = HostPtpConfigDeviceType("pciPassthruNic") ) +func (e HostPtpConfigDeviceType) Values() []HostPtpConfigDeviceType { + return []HostPtpConfigDeviceType{ + HostPtpConfigDeviceTypeNone, + HostPtpConfigDeviceTypeVirtualNic, + HostPtpConfigDeviceTypePciPassthruNic, + } +} + +func (e HostPtpConfigDeviceType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostPtpConfigDeviceType"] = reflect.TypeOf((*HostPtpConfigDeviceType)(nil)).Elem() minAPIVersionForType["HostPtpConfigDeviceType"] = "7.0.3.0" @@ -4055,6 +6472,17 @@ const ( HostQualifiedNameTypeVvolNvmeQualifiedName = HostQualifiedNameType("vvolNvmeQualifiedName") ) +func (e HostQualifiedNameType) Values() []HostQualifiedNameType { + return []HostQualifiedNameType{ + HostQualifiedNameTypeNvmeQualifiedName, + HostQualifiedNameTypeVvolNvmeQualifiedName, + } +} + +func (e HostQualifiedNameType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostQualifiedNameType"] = reflect.TypeOf((*HostQualifiedNameType)(nil)).Elem() minAPIVersionForType["HostQualifiedNameType"] = "7.0.3.0" @@ -4071,6 +6499,7 @@ func init() { // // Further details can be found in: // - "Infiniband (TM) Architecture Specification, Volume 1" +// section 7.2 "Link states" type HostRdmaDeviceConnectionState string const ( @@ -4113,15 +6542,30 @@ const ( HostRdmaDeviceConnectionStateActiveDefer = HostRdmaDeviceConnectionState("activeDefer") ) +func (e HostRdmaDeviceConnectionState) Values() []HostRdmaDeviceConnectionState { + return []HostRdmaDeviceConnectionState{ + HostRdmaDeviceConnectionStateUnknown, + HostRdmaDeviceConnectionStateDown, + HostRdmaDeviceConnectionStateInit, + HostRdmaDeviceConnectionStateArmed, + HostRdmaDeviceConnectionStateActive, + HostRdmaDeviceConnectionStateActiveDefer, + } +} + +func (e HostRdmaDeviceConnectionState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostRdmaDeviceConnectionState"] = reflect.TypeOf((*HostRdmaDeviceConnectionState)(nil)).Elem() - minAPIVersionForType["HostRdmaDeviceConnectionState"] = "7.0" } // Deprecated as of vSphere API 6.0. // // Set of possible values for // `HostCapability.replayUnsupportedReason` and +// `HostCapability.replayCompatibilityIssues`. type HostReplayUnsupportedReason string const ( @@ -4133,11 +6577,26 @@ const ( HostReplayUnsupportedReasonUnknown = HostReplayUnsupportedReason("unknown") ) -func init() { - t["HostReplayUnsupportedReason"] = reflect.TypeOf((*HostReplayUnsupportedReason)(nil)).Elem() - minAPIVersionForType["HostReplayUnsupportedReason"] = "4.0" +func (e HostReplayUnsupportedReason) Values() []HostReplayUnsupportedReason { + return []HostReplayUnsupportedReason{ + HostReplayUnsupportedReasonIncompatibleProduct, + HostReplayUnsupportedReasonIncompatibleCpu, + HostReplayUnsupportedReasonHvDisabled, + HostReplayUnsupportedReasonCpuidLimitSet, + HostReplayUnsupportedReasonOldBIOS, + HostReplayUnsupportedReasonUnknown, + } } +func (e HostReplayUnsupportedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostReplayUnsupportedReason"] = reflect.TypeOf((*HostReplayUnsupportedReason)(nil)).Elem() +} + +// Define the instance state type type HostRuntimeInfoNetStackInstanceRuntimeInfoState string const ( @@ -4151,9 +6610,21 @@ const ( HostRuntimeInfoNetStackInstanceRuntimeInfoStateActivating = HostRuntimeInfoNetStackInstanceRuntimeInfoState("activating") ) +func (e HostRuntimeInfoNetStackInstanceRuntimeInfoState) Values() []HostRuntimeInfoNetStackInstanceRuntimeInfoState { + return []HostRuntimeInfoNetStackInstanceRuntimeInfoState{ + HostRuntimeInfoNetStackInstanceRuntimeInfoStateInactive, + HostRuntimeInfoNetStackInstanceRuntimeInfoStateActive, + HostRuntimeInfoNetStackInstanceRuntimeInfoStateDeactivating, + HostRuntimeInfoNetStackInstanceRuntimeInfoStateActivating, + } +} + +func (e HostRuntimeInfoNetStackInstanceRuntimeInfoState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostRuntimeInfoNetStackInstanceRuntimeInfoState"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfoState)(nil)).Elem() - minAPIVersionForType["HostRuntimeInfoNetStackInstanceRuntimeInfoState"] = "5.5" } type HostRuntimeInfoStateEncryptionInfoProtectionMode string @@ -4165,6 +6636,17 @@ const ( HostRuntimeInfoStateEncryptionInfoProtectionModeTpm = HostRuntimeInfoStateEncryptionInfoProtectionMode("tpm") ) +func (e HostRuntimeInfoStateEncryptionInfoProtectionMode) Values() []HostRuntimeInfoStateEncryptionInfoProtectionMode { + return []HostRuntimeInfoStateEncryptionInfoProtectionMode{ + HostRuntimeInfoStateEncryptionInfoProtectionModeNone, + HostRuntimeInfoStateEncryptionInfoProtectionModeTpm, + } +} + +func (e HostRuntimeInfoStateEncryptionInfoProtectionMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostRuntimeInfoStateEncryptionInfoProtectionMode"] = reflect.TypeOf((*HostRuntimeInfoStateEncryptionInfoProtectionMode)(nil)).Elem() minAPIVersionForType["HostRuntimeInfoStateEncryptionInfoProtectionMode"] = "7.0.3.0" @@ -4181,6 +6663,18 @@ const ( HostRuntimeInfoStatelessNvdsMigrationStateUnknown = HostRuntimeInfoStatelessNvdsMigrationState("unknown") ) +func (e HostRuntimeInfoStatelessNvdsMigrationState) Values() []HostRuntimeInfoStatelessNvdsMigrationState { + return []HostRuntimeInfoStatelessNvdsMigrationState{ + HostRuntimeInfoStatelessNvdsMigrationStateReady, + HostRuntimeInfoStatelessNvdsMigrationStateNotNeeded, + HostRuntimeInfoStatelessNvdsMigrationStateUnknown, + } +} + +func (e HostRuntimeInfoStatelessNvdsMigrationState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostRuntimeInfoStatelessNvdsMigrationState"] = reflect.TypeOf((*HostRuntimeInfoStatelessNvdsMigrationState)(nil)).Elem() minAPIVersionForType["HostRuntimeInfoStatelessNvdsMigrationState"] = "7.0.2.0" @@ -4198,6 +6692,18 @@ const ( HostServicePolicyOff = HostServicePolicy("off") ) +func (e HostServicePolicy) Values() []HostServicePolicy { + return []HostServicePolicy{ + HostServicePolicyOn, + HostServicePolicyAutomatic, + HostServicePolicyOff, + } +} + +func (e HostServicePolicy) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostServicePolicy"] = reflect.TypeOf((*HostServicePolicy)(nil)).Elem() } @@ -4210,11 +6716,24 @@ const ( HostSevInfoSevStateWorking = HostSevInfoSevState("working") ) +func (e HostSevInfoSevState) Values() []HostSevInfoSevState { + return []HostSevInfoSevState{ + HostSevInfoSevStateUninitialized, + HostSevInfoSevStateInitialized, + HostSevInfoSevStateWorking, + } +} + +func (e HostSevInfoSevState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostSevInfoSevState"] = reflect.TypeOf((*HostSevInfoSevState)(nil)).Elem() minAPIVersionForType["HostSevInfoSevState"] = "7.0.1.0" } +// Flexible Launch Enclave (FLC) modes. type HostSgxInfoFlcModes string const ( @@ -4231,11 +6750,23 @@ const ( HostSgxInfoFlcModesUnlocked = HostSgxInfoFlcModes("unlocked") ) -func init() { - t["HostSgxInfoFlcModes"] = reflect.TypeOf((*HostSgxInfoFlcModes)(nil)).Elem() - minAPIVersionForType["HostSgxInfoFlcModes"] = "7.0" +func (e HostSgxInfoFlcModes) Values() []HostSgxInfoFlcModes { + return []HostSgxInfoFlcModes{ + HostSgxInfoFlcModesOff, + HostSgxInfoFlcModesLocked, + HostSgxInfoFlcModesUnlocked, + } } +func (e HostSgxInfoFlcModes) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostSgxInfoFlcModes"] = reflect.TypeOf((*HostSgxInfoFlcModes)(nil)).Elem() +} + +// Host SGX states. type HostSgxInfoSgxStates string const ( @@ -4260,9 +6791,25 @@ const ( HostSgxInfoSgxStatesEnabled = HostSgxInfoSgxStates("enabled") ) +func (e HostSgxInfoSgxStates) Values() []HostSgxInfoSgxStates { + return []HostSgxInfoSgxStates{ + HostSgxInfoSgxStatesNotPresent, + HostSgxInfoSgxStatesDisabledBIOS, + HostSgxInfoSgxStatesDisabledCFW101, + HostSgxInfoSgxStatesDisabledCPUMismatch, + HostSgxInfoSgxStatesDisabledNoFLC, + HostSgxInfoSgxStatesDisabledNUMAUnsup, + HostSgxInfoSgxStatesDisabledMaxEPCRegs, + HostSgxInfoSgxStatesEnabled, + } +} + +func (e HostSgxInfoSgxStates) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostSgxInfoSgxStates"] = reflect.TypeOf((*HostSgxInfoSgxStates)(nil)).Elem() - minAPIVersionForType["HostSgxInfoSgxStates"] = "7.0" } type HostSgxRegistrationInfoRegistrationStatus string @@ -4276,6 +6823,18 @@ const ( HostSgxRegistrationInfoRegistrationStatusComplete = HostSgxRegistrationInfoRegistrationStatus("complete") ) +func (e HostSgxRegistrationInfoRegistrationStatus) Values() []HostSgxRegistrationInfoRegistrationStatus { + return []HostSgxRegistrationInfoRegistrationStatus{ + HostSgxRegistrationInfoRegistrationStatusNotApplicable, + HostSgxRegistrationInfoRegistrationStatusIncomplete, + HostSgxRegistrationInfoRegistrationStatusComplete, + } +} + +func (e HostSgxRegistrationInfoRegistrationStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostSgxRegistrationInfoRegistrationStatus"] = reflect.TypeOf((*HostSgxRegistrationInfoRegistrationStatus)(nil)).Elem() minAPIVersionForType["HostSgxRegistrationInfoRegistrationStatus"] = "8.0.0.1" @@ -4291,11 +6850,23 @@ const ( HostSgxRegistrationInfoRegistrationTypeAddPackage = HostSgxRegistrationInfoRegistrationType("addPackage") ) +func (e HostSgxRegistrationInfoRegistrationType) Values() []HostSgxRegistrationInfoRegistrationType { + return []HostSgxRegistrationInfoRegistrationType{ + HostSgxRegistrationInfoRegistrationTypeManifest, + HostSgxRegistrationInfoRegistrationTypeAddPackage, + } +} + +func (e HostSgxRegistrationInfoRegistrationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostSgxRegistrationInfoRegistrationType"] = reflect.TypeOf((*HostSgxRegistrationInfoRegistrationType)(nil)).Elem() minAPIVersionForType["HostSgxRegistrationInfoRegistrationType"] = "8.0.0.1" } +// SNMP Agent supported capabilities enum type HostSnmpAgentCapability string const ( @@ -4307,11 +6878,23 @@ const ( HostSnmpAgentCapabilityCONFIGURATION = HostSnmpAgentCapability("CONFIGURATION") ) -func init() { - t["HostSnmpAgentCapability"] = reflect.TypeOf((*HostSnmpAgentCapability)(nil)).Elem() - minAPIVersionForType["HostSnmpAgentCapability"] = "4.0" +func (e HostSnmpAgentCapability) Values() []HostSnmpAgentCapability { + return []HostSnmpAgentCapability{ + HostSnmpAgentCapabilityCOMPLETE, + HostSnmpAgentCapabilityDIAGNOSTICS, + HostSnmpAgentCapabilityCONFIGURATION, + } } +func (e HostSnmpAgentCapability) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostSnmpAgentCapability"] = reflect.TypeOf((*HostSnmpAgentCapability)(nil)).Elem() +} + +// Defines a host's standby mode. type HostStandbyMode string const ( @@ -4326,11 +6909,24 @@ const ( HostStandbyModeNone = HostStandbyMode("none") ) -func init() { - t["HostStandbyMode"] = reflect.TypeOf((*HostStandbyMode)(nil)).Elem() - minAPIVersionForType["HostStandbyMode"] = "4.1" +func (e HostStandbyMode) Values() []HostStandbyMode { + return []HostStandbyMode{ + HostStandbyModeEntering, + HostStandbyModeExiting, + HostStandbyModeIn, + HostStandbyModeNone, + } } +func (e HostStandbyMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostStandbyMode"] = reflect.TypeOf((*HostStandbyMode)(nil)).Elem() +} + +// The set of supported host bus adapter protocols. type HostStorageProtocol string const ( @@ -4340,9 +6936,19 @@ const ( HostStorageProtocolNvme = HostStorageProtocol("nvme") ) +func (e HostStorageProtocol) Values() []HostStorageProtocol { + return []HostStorageProtocol{ + HostStorageProtocolScsi, + HostStorageProtocolNvme, + } +} + +func (e HostStorageProtocol) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostStorageProtocol"] = reflect.TypeOf((*HostStorageProtocol)(nil)).Elem() - minAPIVersionForType["HostStorageProtocol"] = "7.0" } // Defines a host's connection state. @@ -4367,6 +6973,18 @@ const ( HostSystemConnectionStateDisconnected = HostSystemConnectionState("disconnected") ) +func (e HostSystemConnectionState) Values() []HostSystemConnectionState { + return []HostSystemConnectionState{ + HostSystemConnectionStateConnected, + HostSystemConnectionStateNotResponding, + HostSystemConnectionStateDisconnected, + } +} + +func (e HostSystemConnectionState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostSystemConnectionState"] = reflect.TypeOf((*HostSystemConnectionState)(nil)).Elem() } @@ -4386,16 +7004,25 @@ const ( HostSystemIdentificationInfoIdentifierSerialNumberTag = HostSystemIdentificationInfoIdentifier("SerialNumberTag") ) -func init() { - t["HostSystemIdentificationInfoIdentifier"] = reflect.TypeOf((*HostSystemIdentificationInfoIdentifier)(nil)).Elem() - minAPIVersionForType["HostSystemIdentificationInfoIdentifier"] = "2.5" - minAPIVersionForEnumValue["HostSystemIdentificationInfoIdentifier"] = map[string]string{ - "OemSpecificString": "5.0", - "EnclosureSerialNumberTag": "6.0", - "SerialNumberTag": "6.0", +func (e HostSystemIdentificationInfoIdentifier) Values() []HostSystemIdentificationInfoIdentifier { + return []HostSystemIdentificationInfoIdentifier{ + HostSystemIdentificationInfoIdentifierAssetTag, + HostSystemIdentificationInfoIdentifierServiceTag, + HostSystemIdentificationInfoIdentifierOemSpecificString, + HostSystemIdentificationInfoIdentifierEnclosureSerialNumberTag, + HostSystemIdentificationInfoIdentifierSerialNumberTag, } } +func (e HostSystemIdentificationInfoIdentifier) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostSystemIdentificationInfoIdentifier"] = reflect.TypeOf((*HostSystemIdentificationInfoIdentifier)(nil)).Elem() +} + +// Defines a host's power state. type HostSystemPowerState string const ( @@ -4430,11 +7057,24 @@ const ( HostSystemPowerStateUnknown = HostSystemPowerState("unknown") ) -func init() { - t["HostSystemPowerState"] = reflect.TypeOf((*HostSystemPowerState)(nil)).Elem() - minAPIVersionForType["HostSystemPowerState"] = "2.5" +func (e HostSystemPowerState) Values() []HostSystemPowerState { + return []HostSystemPowerState{ + HostSystemPowerStatePoweredOn, + HostSystemPowerStatePoweredOff, + HostSystemPowerStateStandBy, + HostSystemPowerStateUnknown, + } } +func (e HostSystemPowerState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostSystemPowerState"] = reflect.TypeOf((*HostSystemPowerState)(nil)).Elem() +} + +// Valid state for host profile remediation. type HostSystemRemediationStateState string const ( @@ -4452,11 +7092,26 @@ const ( HostSystemRemediationStateStateRemediationFailed = HostSystemRemediationStateState("remediationFailed") ) -func init() { - t["HostSystemRemediationStateState"] = reflect.TypeOf((*HostSystemRemediationStateState)(nil)).Elem() - minAPIVersionForType["HostSystemRemediationStateState"] = "6.7" +func (e HostSystemRemediationStateState) Values() []HostSystemRemediationStateState { + return []HostSystemRemediationStateState{ + HostSystemRemediationStateStateRemediationReady, + HostSystemRemediationStateStatePrecheckRemediationRunning, + HostSystemRemediationStateStatePrecheckRemediationComplete, + HostSystemRemediationStateStatePrecheckRemediationFailed, + HostSystemRemediationStateStateRemediationRunning, + HostSystemRemediationStateStateRemediationFailed, + } } +func (e HostSystemRemediationStateState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostSystemRemediationStateState"] = reflect.TypeOf((*HostSystemRemediationStateState)(nil)).Elem() +} + +// Status constants of TPM attestation. type HostTpmAttestationInfoAcceptanceStatus string const ( @@ -4466,9 +7121,19 @@ const ( HostTpmAttestationInfoAcceptanceStatusAccepted = HostTpmAttestationInfoAcceptanceStatus("accepted") ) +func (e HostTpmAttestationInfoAcceptanceStatus) Values() []HostTpmAttestationInfoAcceptanceStatus { + return []HostTpmAttestationInfoAcceptanceStatus{ + HostTpmAttestationInfoAcceptanceStatusNotAccepted, + HostTpmAttestationInfoAcceptanceStatusAccepted, + } +} + +func (e HostTpmAttestationInfoAcceptanceStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostTpmAttestationInfoAcceptanceStatus"] = reflect.TypeOf((*HostTpmAttestationInfoAcceptanceStatus)(nil)).Elem() - minAPIVersionForType["HostTpmAttestationInfoAcceptanceStatus"] = "6.7" } type HostTrustAuthorityAttestationInfoAttestationStatus string @@ -4482,12 +7147,25 @@ const ( HostTrustAuthorityAttestationInfoAttestationStatusUnknown = HostTrustAuthorityAttestationInfoAttestationStatus("unknown") ) +func (e HostTrustAuthorityAttestationInfoAttestationStatus) Values() []HostTrustAuthorityAttestationInfoAttestationStatus { + return []HostTrustAuthorityAttestationInfoAttestationStatus{ + HostTrustAuthorityAttestationInfoAttestationStatusAttested, + HostTrustAuthorityAttestationInfoAttestationStatusNotAttested, + HostTrustAuthorityAttestationInfoAttestationStatusUnknown, + } +} + +func (e HostTrustAuthorityAttestationInfoAttestationStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostTrustAuthorityAttestationInfoAttestationStatus"] = reflect.TypeOf((*HostTrustAuthorityAttestationInfoAttestationStatus)(nil)).Elem() minAPIVersionForType["HostTrustAuthorityAttestationInfoAttestationStatus"] = "7.0.1.0" } // Reasons for identifying the disk extent +// as copy of VMFS volume extent. type HostUnresolvedVmfsExtentUnresolvedReason string const ( @@ -4498,9 +7176,19 @@ const ( HostUnresolvedVmfsExtentUnresolvedReasonUuidConflict = HostUnresolvedVmfsExtentUnresolvedReason("uuidConflict") ) +func (e HostUnresolvedVmfsExtentUnresolvedReason) Values() []HostUnresolvedVmfsExtentUnresolvedReason { + return []HostUnresolvedVmfsExtentUnresolvedReason{ + HostUnresolvedVmfsExtentUnresolvedReasonDiskIdMismatch, + HostUnresolvedVmfsExtentUnresolvedReasonUuidConflict, + } +} + +func (e HostUnresolvedVmfsExtentUnresolvedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostUnresolvedVmfsExtentUnresolvedReason"] = reflect.TypeOf((*HostUnresolvedVmfsExtentUnresolvedReason)(nil)).Elem() - minAPIVersionForType["HostUnresolvedVmfsExtentUnresolvedReason"] = "4.0" } type HostUnresolvedVmfsResolutionSpecVmfsUuidResolution string @@ -4520,9 +7208,19 @@ const ( HostUnresolvedVmfsResolutionSpecVmfsUuidResolutionForceMount = HostUnresolvedVmfsResolutionSpecVmfsUuidResolution("forceMount") ) +func (e HostUnresolvedVmfsResolutionSpecVmfsUuidResolution) Values() []HostUnresolvedVmfsResolutionSpecVmfsUuidResolution { + return []HostUnresolvedVmfsResolutionSpecVmfsUuidResolution{ + HostUnresolvedVmfsResolutionSpecVmfsUuidResolutionResignature, + HostUnresolvedVmfsResolutionSpecVmfsUuidResolutionForceMount, + } +} + +func (e HostUnresolvedVmfsResolutionSpecVmfsUuidResolution) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostUnresolvedVmfsResolutionSpecVmfsUuidResolution"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionSpecVmfsUuidResolution)(nil)).Elem() - minAPIVersionForType["HostUnresolvedVmfsResolutionSpecVmfsUuidResolution"] = "4.0" } type HostVirtualNicManagerNicType string @@ -4584,22 +7282,36 @@ const ( HostVirtualNicManagerNicTypeNvmeRdma = HostVirtualNicManagerNicType("nvmeRdma") ) -func init() { - t["HostVirtualNicManagerNicType"] = reflect.TypeOf((*HostVirtualNicManagerNicType)(nil)).Elem() - minAPIVersionForType["HostVirtualNicManagerNicType"] = "4.0" - minAPIVersionForEnumValue["HostVirtualNicManagerNicType"] = map[string]string{ - "vSphereReplication": "5.1", - "vSphereReplicationNFC": "6.0", - "vsan": "5.5", - "vSphereProvisioning": "6.0", - "vsanWitness": "6.5", - "vSphereBackupNFC": "7.0", - "ptp": "7.0", - "nvmeTcp": "7.0.3.0", - "nvmeRdma": "7.0.3.0", +func (e HostVirtualNicManagerNicType) Values() []HostVirtualNicManagerNicType { + return []HostVirtualNicManagerNicType{ + HostVirtualNicManagerNicTypeVmotion, + HostVirtualNicManagerNicTypeFaultToleranceLogging, + HostVirtualNicManagerNicTypeVSphereReplication, + HostVirtualNicManagerNicTypeVSphereReplicationNFC, + HostVirtualNicManagerNicTypeManagement, + HostVirtualNicManagerNicTypeVsan, + HostVirtualNicManagerNicTypeVSphereProvisioning, + HostVirtualNicManagerNicTypeVsanWitness, + HostVirtualNicManagerNicTypeVSphereBackupNFC, + HostVirtualNicManagerNicTypePtp, + HostVirtualNicManagerNicTypeNvmeTcp, + HostVirtualNicManagerNicTypeNvmeRdma, } } +func (e HostVirtualNicManagerNicType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostVirtualNicManagerNicType"] = reflect.TypeOf((*HostVirtualNicManagerNicType)(nil)).Elem() + minAPIVersionForEnumValue["HostVirtualNicManagerNicType"] = map[string]string{ + "nvmeTcp": "7.0.3.0", + "nvmeRdma": "7.0.3.0", + } +} + +// Set of possible values for mode field in AccessSpec. type HostVmciAccessManagerMode string const ( @@ -4611,14 +7323,26 @@ const ( HostVmciAccessManagerModeRevoke = HostVmciAccessManagerMode("revoke") ) +func (e HostVmciAccessManagerMode) Values() []HostVmciAccessManagerMode { + return []HostVmciAccessManagerMode{ + HostVmciAccessManagerModeGrant, + HostVmciAccessManagerModeReplace, + HostVmciAccessManagerModeRevoke, + } +} + +func (e HostVmciAccessManagerMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostVmciAccessManagerMode"] = reflect.TypeOf((*HostVmciAccessManagerMode)(nil)).Elem() - minAPIVersionForType["HostVmciAccessManagerMode"] = "5.0" } // VMFS unmap bandwidth policy. // // VMFS unmap reclaims unused storage space. +// This specifies the bandwidth policy option of unmaps. type HostVmfsVolumeUnmapBandwidthPolicy string const ( @@ -4628,14 +7352,25 @@ const ( HostVmfsVolumeUnmapBandwidthPolicyDynamic = HostVmfsVolumeUnmapBandwidthPolicy("dynamic") ) +func (e HostVmfsVolumeUnmapBandwidthPolicy) Values() []HostVmfsVolumeUnmapBandwidthPolicy { + return []HostVmfsVolumeUnmapBandwidthPolicy{ + HostVmfsVolumeUnmapBandwidthPolicyFixed, + HostVmfsVolumeUnmapBandwidthPolicyDynamic, + } +} + +func (e HostVmfsVolumeUnmapBandwidthPolicy) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HostVmfsVolumeUnmapBandwidthPolicy"] = reflect.TypeOf((*HostVmfsVolumeUnmapBandwidthPolicy)(nil)).Elem() - minAPIVersionForType["HostVmfsVolumeUnmapBandwidthPolicy"] = "6.7" } // VMFS unmap priority. // // VMFS unmap reclaims unused storage space. +// This specifies the processing rate of unmaps. type HostVmfsVolumeUnmapPriority string const ( @@ -4645,11 +7380,22 @@ const ( HostVmfsVolumeUnmapPriorityLow = HostVmfsVolumeUnmapPriority("low") ) -func init() { - t["HostVmfsVolumeUnmapPriority"] = reflect.TypeOf((*HostVmfsVolumeUnmapPriority)(nil)).Elem() - minAPIVersionForType["HostVmfsVolumeUnmapPriority"] = "6.5" +func (e HostVmfsVolumeUnmapPriority) Values() []HostVmfsVolumeUnmapPriority { + return []HostVmfsVolumeUnmapPriority{ + HostVmfsVolumeUnmapPriorityNone, + HostVmfsVolumeUnmapPriorityLow, + } } +func (e HostVmfsVolumeUnmapPriority) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HostVmfsVolumeUnmapPriority"] = reflect.TypeOf((*HostVmfsVolumeUnmapPriority)(nil)).Elem() +} + +// List of supported algorithms for checksum calculation. type HttpNfcLeaseManifestEntryChecksumType string const ( @@ -4657,11 +7403,22 @@ const ( HttpNfcLeaseManifestEntryChecksumTypeSha256 = HttpNfcLeaseManifestEntryChecksumType("sha256") ) -func init() { - t["HttpNfcLeaseManifestEntryChecksumType"] = reflect.TypeOf((*HttpNfcLeaseManifestEntryChecksumType)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseManifestEntryChecksumType"] = "6.7" +func (e HttpNfcLeaseManifestEntryChecksumType) Values() []HttpNfcLeaseManifestEntryChecksumType { + return []HttpNfcLeaseManifestEntryChecksumType{ + HttpNfcLeaseManifestEntryChecksumTypeSha1, + HttpNfcLeaseManifestEntryChecksumTypeSha256, + } } +func (e HttpNfcLeaseManifestEntryChecksumType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HttpNfcLeaseManifestEntryChecksumType"] = reflect.TypeOf((*HttpNfcLeaseManifestEntryChecksumType)(nil)).Elem() +} + +// List of supported modes by HttpNfcLease type HttpNfcLeaseMode string const ( @@ -4674,11 +7431,22 @@ const ( HttpNfcLeaseModePull = HttpNfcLeaseMode("pull") ) -func init() { - t["HttpNfcLeaseMode"] = reflect.TypeOf((*HttpNfcLeaseMode)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseMode"] = "6.7" +func (e HttpNfcLeaseMode) Values() []HttpNfcLeaseMode { + return []HttpNfcLeaseMode{ + HttpNfcLeaseModePushOrGet, + HttpNfcLeaseModePull, + } } +func (e HttpNfcLeaseMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["HttpNfcLeaseMode"] = reflect.TypeOf((*HttpNfcLeaseMode)(nil)).Elem() +} + +// List of possible states of a lease. type HttpNfcLeaseState string const ( @@ -4693,9 +7461,21 @@ const ( HttpNfcLeaseStateError = HttpNfcLeaseState("error") ) +func (e HttpNfcLeaseState) Values() []HttpNfcLeaseState { + return []HttpNfcLeaseState{ + HttpNfcLeaseStateInitializing, + HttpNfcLeaseStateReady, + HttpNfcLeaseStateDone, + HttpNfcLeaseStateError, + } +} + +func (e HttpNfcLeaseState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["HttpNfcLeaseState"] = reflect.TypeOf((*HttpNfcLeaseState)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseState"] = "4.0" } type IncompatibleHostForVmReplicationIncompatibleReason string @@ -4708,9 +7488,19 @@ const ( IncompatibleHostForVmReplicationIncompatibleReasonNetCompression = IncompatibleHostForVmReplicationIncompatibleReason("netCompression") ) +func (e IncompatibleHostForVmReplicationIncompatibleReason) Values() []IncompatibleHostForVmReplicationIncompatibleReason { + return []IncompatibleHostForVmReplicationIncompatibleReason{ + IncompatibleHostForVmReplicationIncompatibleReasonRpo, + IncompatibleHostForVmReplicationIncompatibleReasonNetCompression, + } +} + +func (e IncompatibleHostForVmReplicationIncompatibleReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["IncompatibleHostForVmReplicationIncompatibleReason"] = reflect.TypeOf((*IncompatibleHostForVmReplicationIncompatibleReason)(nil)).Elem() - minAPIVersionForType["IncompatibleHostForVmReplicationIncompatibleReason"] = "6.0" } // The available iSNS discovery methods. @@ -4722,6 +7512,18 @@ const ( InternetScsiSnsDiscoveryMethodIsnsSlp = InternetScsiSnsDiscoveryMethod("isnsSlp") ) +func (e InternetScsiSnsDiscoveryMethod) Values() []InternetScsiSnsDiscoveryMethod { + return []InternetScsiSnsDiscoveryMethod{ + InternetScsiSnsDiscoveryMethodIsnsStatic, + InternetScsiSnsDiscoveryMethodIsnsDhcp, + InternetScsiSnsDiscoveryMethodIsnsSlp, + } +} + +func (e InternetScsiSnsDiscoveryMethod) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["InternetScsiSnsDiscoveryMethod"] = reflect.TypeOf((*InternetScsiSnsDiscoveryMethod)(nil)).Elem() } @@ -4737,9 +7539,20 @@ const ( InvalidDasConfigArgumentEntryForInvalidArgumentVmConfig = InvalidDasConfigArgumentEntryForInvalidArgument("vmConfig") ) +func (e InvalidDasConfigArgumentEntryForInvalidArgument) Values() []InvalidDasConfigArgumentEntryForInvalidArgument { + return []InvalidDasConfigArgumentEntryForInvalidArgument{ + InvalidDasConfigArgumentEntryForInvalidArgumentAdmissionControl, + InvalidDasConfigArgumentEntryForInvalidArgumentUserHeartbeatDs, + InvalidDasConfigArgumentEntryForInvalidArgumentVmConfig, + } +} + +func (e InvalidDasConfigArgumentEntryForInvalidArgument) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["InvalidDasConfigArgumentEntryForInvalidArgument"] = reflect.TypeOf((*InvalidDasConfigArgumentEntryForInvalidArgument)(nil)).Elem() - minAPIVersionForType["InvalidDasConfigArgumentEntryForInvalidArgument"] = "5.1" } type InvalidProfileReferenceHostReason string @@ -4751,11 +7564,22 @@ const ( InvalidProfileReferenceHostReasonMissingReferenceHost = InvalidProfileReferenceHostReason("missingReferenceHost") ) -func init() { - t["InvalidProfileReferenceHostReason"] = reflect.TypeOf((*InvalidProfileReferenceHostReason)(nil)).Elem() - minAPIVersionForType["InvalidProfileReferenceHostReason"] = "5.0" +func (e InvalidProfileReferenceHostReason) Values() []InvalidProfileReferenceHostReason { + return []InvalidProfileReferenceHostReason{ + InvalidProfileReferenceHostReasonIncompatibleVersion, + InvalidProfileReferenceHostReasonMissingReferenceHost, + } } +func (e InvalidProfileReferenceHostReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["InvalidProfileReferenceHostReason"] = reflect.TypeOf((*InvalidProfileReferenceHostReason)(nil)).Elem() +} + +// Defines the type of operation for an IO Filter. type IoFilterOperation string const ( @@ -4767,11 +7591,23 @@ const ( IoFilterOperationUpgrade = IoFilterOperation("upgrade") ) -func init() { - t["IoFilterOperation"] = reflect.TypeOf((*IoFilterOperation)(nil)).Elem() - minAPIVersionForType["IoFilterOperation"] = "6.0" +func (e IoFilterOperation) Values() []IoFilterOperation { + return []IoFilterOperation{ + IoFilterOperationInstall, + IoFilterOperationUninstall, + IoFilterOperationUpgrade, + } } +func (e IoFilterOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["IoFilterOperation"] = reflect.TypeOf((*IoFilterOperation)(nil)).Elem() +} + +// Defines the type of an IO Filter. type IoFilterType string const ( @@ -4793,9 +7629,25 @@ const ( IoFilterTypeDataCapture = IoFilterType("dataCapture") ) +func (e IoFilterType) Values() []IoFilterType { + return []IoFilterType{ + IoFilterTypeCache, + IoFilterTypeReplication, + IoFilterTypeEncryption, + IoFilterTypeCompression, + IoFilterTypeInspection, + IoFilterTypeDatastoreIoControl, + IoFilterTypeDataProvider, + IoFilterTypeDataCapture, + } +} + +func (e IoFilterType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["IoFilterType"] = reflect.TypeOf((*IoFilterType)(nil)).Elem() - minAPIVersionForType["IoFilterType"] = "6.5" minAPIVersionForEnumValue["IoFilterType"] = map[string]string{ "dataCapture": "7.0.2.1", } @@ -4820,11 +7672,24 @@ const ( IscsiPortInfoPathStatusLastActive = IscsiPortInfoPathStatus("lastActive") ) -func init() { - t["IscsiPortInfoPathStatus"] = reflect.TypeOf((*IscsiPortInfoPathStatus)(nil)).Elem() - minAPIVersionForType["IscsiPortInfoPathStatus"] = "5.0" +func (e IscsiPortInfoPathStatus) Values() []IscsiPortInfoPathStatus { + return []IscsiPortInfoPathStatus{ + IscsiPortInfoPathStatusNotUsed, + IscsiPortInfoPathStatusActive, + IscsiPortInfoPathStatusStandBy, + IscsiPortInfoPathStatusLastActive, + } } +func (e IscsiPortInfoPathStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["IscsiPortInfoPathStatus"] = reflect.TypeOf((*IscsiPortInfoPathStatus)(nil)).Elem() +} + +// Key provider management type. type KmipClusterInfoKmsManagementType string const ( @@ -4835,9 +7700,21 @@ const ( KmipClusterInfoKmsManagementTypeNativeProvider = KmipClusterInfoKmsManagementType("nativeProvider") ) +func (e KmipClusterInfoKmsManagementType) Values() []KmipClusterInfoKmsManagementType { + return []KmipClusterInfoKmsManagementType{ + KmipClusterInfoKmsManagementTypeUnknown, + KmipClusterInfoKmsManagementTypeVCenter, + KmipClusterInfoKmsManagementTypeTrustAuthority, + KmipClusterInfoKmsManagementTypeNativeProvider, + } +} + +func (e KmipClusterInfoKmsManagementType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["KmipClusterInfoKmsManagementType"] = reflect.TypeOf((*KmipClusterInfoKmsManagementType)(nil)).Elem() - minAPIVersionForType["KmipClusterInfoKmsManagementType"] = "7.0" minAPIVersionForEnumValue["KmipClusterInfoKmsManagementType"] = map[string]string{ "nativeProvider": "7.0.2.0", } @@ -4847,6 +7724,7 @@ func init() { // used to specify the latency-sensitivity level of the application. // // In terms of latency-sensitivity the values relate: +// high>medium>normal>low. type LatencySensitivitySensitivityLevel string const ( @@ -4860,8 +7738,6 @@ const ( LatencySensitivitySensitivityLevelMedium = LatencySensitivitySensitivityLevel("medium") // The relative latency-sensitivity high value. LatencySensitivitySensitivityLevelHigh = LatencySensitivitySensitivityLevel("high") - // - // // Deprecated as of vSphere API Ver 6.0. Value will be ignored and // treated as "normal" latency sensitivity. // @@ -4875,9 +7751,22 @@ const ( LatencySensitivitySensitivityLevelCustom = LatencySensitivitySensitivityLevel("custom") ) +func (e LatencySensitivitySensitivityLevel) Values() []LatencySensitivitySensitivityLevel { + return []LatencySensitivitySensitivityLevel{ + LatencySensitivitySensitivityLevelLow, + LatencySensitivitySensitivityLevelNormal, + LatencySensitivitySensitivityLevelMedium, + LatencySensitivitySensitivityLevelHigh, + LatencySensitivitySensitivityLevelCustom, + } +} + +func (e LatencySensitivitySensitivityLevel) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["LatencySensitivitySensitivityLevel"] = reflect.TypeOf((*LatencySensitivitySensitivityLevel)(nil)).Elem() - minAPIVersionForType["LatencySensitivitySensitivityLevel"] = "5.1" } type LicenseAssignmentFailedReason string @@ -4893,12 +7782,26 @@ const ( LicenseAssignmentFailedReasonHostsUnmanageableByVirtualCenterWithoutLicenseServer = LicenseAssignmentFailedReason("hostsUnmanageableByVirtualCenterWithoutLicenseServer") ) +func (e LicenseAssignmentFailedReason) Values() []LicenseAssignmentFailedReason { + return []LicenseAssignmentFailedReason{ + LicenseAssignmentFailedReasonKeyEntityMismatch, + LicenseAssignmentFailedReasonDowngradeDisallowed, + LicenseAssignmentFailedReasonInventoryNotManageableByVirtualCenter, + LicenseAssignmentFailedReasonHostsUnmanageableByVirtualCenterWithoutLicenseServer, + } +} + +func (e LicenseAssignmentFailedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["LicenseAssignmentFailedReason"] = reflect.TypeOf((*LicenseAssignmentFailedReason)(nil)).Elem() - minAPIVersionForType["LicenseAssignmentFailedReason"] = "4.0" } // Some licenses may only be allowed to load from a specified source. +// +// This enum indicates what restrictions exist for this license if any. type LicenseFeatureInfoSourceRestriction string const ( @@ -4910,9 +7813,20 @@ const ( LicenseFeatureInfoSourceRestrictionFile = LicenseFeatureInfoSourceRestriction("file") ) +func (e LicenseFeatureInfoSourceRestriction) Values() []LicenseFeatureInfoSourceRestriction { + return []LicenseFeatureInfoSourceRestriction{ + LicenseFeatureInfoSourceRestrictionUnrestricted, + LicenseFeatureInfoSourceRestrictionServed, + LicenseFeatureInfoSourceRestrictionFile, + } +} + +func (e LicenseFeatureInfoSourceRestriction) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["LicenseFeatureInfoSourceRestriction"] = reflect.TypeOf((*LicenseFeatureInfoSourceRestriction)(nil)).Elem() - minAPIVersionForType["LicenseFeatureInfoSourceRestriction"] = "2.5" } // Describes the state of the feature. @@ -4931,6 +7845,18 @@ const ( LicenseFeatureInfoStateOptional = LicenseFeatureInfoState("optional") ) +func (e LicenseFeatureInfoState) Values() []LicenseFeatureInfoState { + return []LicenseFeatureInfoState{ + LicenseFeatureInfoStateEnabled, + LicenseFeatureInfoStateDisabled, + LicenseFeatureInfoStateOptional, + } +} + +func (e LicenseFeatureInfoState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["LicenseFeatureInfoState"] = reflect.TypeOf((*LicenseFeatureInfoState)(nil)).Elem() } @@ -4952,6 +7878,20 @@ const ( LicenseFeatureInfoUnitVm = LicenseFeatureInfoUnit("vm") ) +func (e LicenseFeatureInfoUnit) Values() []LicenseFeatureInfoUnit { + return []LicenseFeatureInfoUnit{ + LicenseFeatureInfoUnitHost, + LicenseFeatureInfoUnitCpuCore, + LicenseFeatureInfoUnitCpuPackage, + LicenseFeatureInfoUnitServer, + LicenseFeatureInfoUnitVm, + } +} + +func (e LicenseFeatureInfoUnit) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["LicenseFeatureInfoUnit"] = reflect.TypeOf((*LicenseFeatureInfoUnit)(nil)).Elem() } @@ -5045,16 +7985,39 @@ const ( LicenseManagerLicenseKeyDas = LicenseManagerLicenseKey("das") ) -func init() { - t["LicenseManagerLicenseKey"] = reflect.TypeOf((*LicenseManagerLicenseKey)(nil)).Elem() - minAPIVersionForEnumValue["LicenseManagerLicenseKey"] = map[string]string{ - "vcExpress": "2.5", - "serverHost": "2.5", - "drsPower": "2.5", +func (e LicenseManagerLicenseKey) Values() []LicenseManagerLicenseKey { + return []LicenseManagerLicenseKey{ + LicenseManagerLicenseKeyEsxFull, + LicenseManagerLicenseKeyEsxVmtn, + LicenseManagerLicenseKeyEsxExpress, + LicenseManagerLicenseKeySan, + LicenseManagerLicenseKeyIscsi, + LicenseManagerLicenseKeyNas, + LicenseManagerLicenseKeyVsmp, + LicenseManagerLicenseKeyBackup, + LicenseManagerLicenseKeyVc, + LicenseManagerLicenseKeyVcExpress, + LicenseManagerLicenseKeyEsxHost, + LicenseManagerLicenseKeyGsxHost, + LicenseManagerLicenseKeyServerHost, + LicenseManagerLicenseKeyDrsPower, + LicenseManagerLicenseKeyVmotion, + LicenseManagerLicenseKeyDrs, + LicenseManagerLicenseKeyDas, } } +func (e LicenseManagerLicenseKey) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["LicenseManagerLicenseKey"] = reflect.TypeOf((*LicenseManagerLicenseKey)(nil)).Elem() +} + // Deprecated as of vSphere API 4.0, this is not used by the system. +// +// State of licensing subsystem. type LicenseManagerState string const ( @@ -5068,9 +8031,21 @@ const ( LicenseManagerStateFault = LicenseManagerState("fault") ) +func (e LicenseManagerState) Values() []LicenseManagerState { + return []LicenseManagerState{ + LicenseManagerStateInitializing, + LicenseManagerStateNormal, + LicenseManagerStateMarginal, + LicenseManagerStateFault, + } +} + +func (e LicenseManagerState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["LicenseManagerState"] = reflect.TypeOf((*LicenseManagerState)(nil)).Elem() - minAPIVersionForType["LicenseManagerState"] = "2.5" } // Describes the reservation state of a license. @@ -5097,10 +8072,24 @@ const ( LicenseReservationInfoStateLicensed = LicenseReservationInfoState("licensed") ) +func (e LicenseReservationInfoState) Values() []LicenseReservationInfoState { + return []LicenseReservationInfoState{ + LicenseReservationInfoStateNotUsed, + LicenseReservationInfoStateNoLicense, + LicenseReservationInfoStateUnlicensedUse, + LicenseReservationInfoStateLicensed, + } +} + +func (e LicenseReservationInfoState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["LicenseReservationInfoState"] = reflect.TypeOf((*LicenseReservationInfoState)(nil)).Elem() } +// The Discovery Protocol operation. type LinkDiscoveryProtocolConfigOperationType string const ( @@ -5118,11 +8107,24 @@ const ( LinkDiscoveryProtocolConfigOperationTypeBoth = LinkDiscoveryProtocolConfigOperationType("both") ) -func init() { - t["LinkDiscoveryProtocolConfigOperationType"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigOperationType)(nil)).Elem() - minAPIVersionForType["LinkDiscoveryProtocolConfigOperationType"] = "4.0" +func (e LinkDiscoveryProtocolConfigOperationType) Values() []LinkDiscoveryProtocolConfigOperationType { + return []LinkDiscoveryProtocolConfigOperationType{ + LinkDiscoveryProtocolConfigOperationTypeNone, + LinkDiscoveryProtocolConfigOperationTypeListen, + LinkDiscoveryProtocolConfigOperationTypeAdvertise, + LinkDiscoveryProtocolConfigOperationTypeBoth, + } } +func (e LinkDiscoveryProtocolConfigOperationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["LinkDiscoveryProtocolConfigOperationType"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigOperationType)(nil)).Elem() +} + +// The Discovery Protocol types. type LinkDiscoveryProtocolConfigProtocolType string const ( @@ -5132,9 +8134,19 @@ const ( LinkDiscoveryProtocolConfigProtocolTypeLldp = LinkDiscoveryProtocolConfigProtocolType("lldp") ) +func (e LinkDiscoveryProtocolConfigProtocolType) Values() []LinkDiscoveryProtocolConfigProtocolType { + return []LinkDiscoveryProtocolConfigProtocolType{ + LinkDiscoveryProtocolConfigProtocolTypeCdp, + LinkDiscoveryProtocolConfigProtocolTypeLldp, + } +} + +func (e LinkDiscoveryProtocolConfigProtocolType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["LinkDiscoveryProtocolConfigProtocolType"] = reflect.TypeOf((*LinkDiscoveryProtocolConfigProtocolType)(nil)).Elem() - minAPIVersionForType["LinkDiscoveryProtocolConfigProtocolType"] = "4.0" } // The Status enumeration defines a general "health" value for a managed entity. @@ -5151,6 +8163,19 @@ const ( ManagedEntityStatusRed = ManagedEntityStatus("red") ) +func (e ManagedEntityStatus) Values() []ManagedEntityStatus { + return []ManagedEntityStatus{ + ManagedEntityStatusGray, + ManagedEntityStatusGreen, + ManagedEntityStatusYellow, + ManagedEntityStatusRed, + } +} + +func (e ManagedEntityStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ManagedEntityStatus"] = reflect.TypeOf((*ManagedEntityStatus)(nil)).Elem() } @@ -5165,6 +8190,17 @@ const ( MetricAlarmOperatorIsBelow = MetricAlarmOperator("isBelow") ) +func (e MetricAlarmOperator) Values() []MetricAlarmOperator { + return []MetricAlarmOperator{ + MetricAlarmOperatorIsAbove, + MetricAlarmOperatorIsBelow, + } +} + +func (e MetricAlarmOperator) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["MetricAlarmOperator"] = reflect.TypeOf((*MetricAlarmOperator)(nil)).Elem() } @@ -5180,10 +8216,25 @@ const ( MultipathStateUnknown = MultipathState("unknown") ) +func (e MultipathState) Values() []MultipathState { + return []MultipathState{ + MultipathStateStandby, + MultipathStateActive, + MultipathStateDisabled, + MultipathStateDead, + MultipathStateUnknown, + } +} + +func (e MultipathState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["MultipathState"] = reflect.TypeOf((*MultipathState)(nil)).Elem() } +// NetBIOS configuration mode. type NetBIOSConfigInfoMode string const ( @@ -5197,12 +8248,26 @@ const ( NetBIOSConfigInfoModeEnabledViaDHCP = NetBIOSConfigInfoMode("enabledViaDHCP") ) +func (e NetBIOSConfigInfoMode) Values() []NetBIOSConfigInfoMode { + return []NetBIOSConfigInfoMode{ + NetBIOSConfigInfoModeUnknown, + NetBIOSConfigInfoModeEnabled, + NetBIOSConfigInfoModeDisabled, + NetBIOSConfigInfoModeEnabledViaDHCP, + } +} + +func (e NetBIOSConfigInfoMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["NetBIOSConfigInfoMode"] = reflect.TypeOf((*NetBIOSConfigInfoMode)(nil)).Elem() - minAPIVersionForType["NetBIOSConfigInfoMode"] = "4.1" } // This specifies how an IP address was obtained for a given interface. +// +// See RFC 4293 IpAddressOriginTC. type NetIpConfigInfoIpAddressOrigin string const ( @@ -5228,9 +8293,22 @@ const ( NetIpConfigInfoIpAddressOriginRandom = NetIpConfigInfoIpAddressOrigin("random") ) +func (e NetIpConfigInfoIpAddressOrigin) Values() []NetIpConfigInfoIpAddressOrigin { + return []NetIpConfigInfoIpAddressOrigin{ + NetIpConfigInfoIpAddressOriginOther, + NetIpConfigInfoIpAddressOriginManual, + NetIpConfigInfoIpAddressOriginDhcp, + NetIpConfigInfoIpAddressOriginLinklayer, + NetIpConfigInfoIpAddressOriginRandom, + } +} + +func (e NetIpConfigInfoIpAddressOrigin) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["NetIpConfigInfoIpAddressOrigin"] = reflect.TypeOf((*NetIpConfigInfoIpAddressOrigin)(nil)).Elem() - minAPIVersionForType["NetIpConfigInfoIpAddressOrigin"] = "4.1" } type NetIpConfigInfoIpAddressStatus string @@ -5256,15 +8334,31 @@ const ( NetIpConfigInfoIpAddressStatusDuplicate = NetIpConfigInfoIpAddressStatus("duplicate") ) +func (e NetIpConfigInfoIpAddressStatus) Values() []NetIpConfigInfoIpAddressStatus { + return []NetIpConfigInfoIpAddressStatus{ + NetIpConfigInfoIpAddressStatusPreferred, + NetIpConfigInfoIpAddressStatusDeprecated, + NetIpConfigInfoIpAddressStatusInvalid, + NetIpConfigInfoIpAddressStatusInaccessible, + NetIpConfigInfoIpAddressStatusUnknown, + NetIpConfigInfoIpAddressStatusTentative, + NetIpConfigInfoIpAddressStatusDuplicate, + } +} + +func (e NetIpConfigInfoIpAddressStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["NetIpConfigInfoIpAddressStatus"] = reflect.TypeOf((*NetIpConfigInfoIpAddressStatus)(nil)).Elem() - minAPIVersionForType["NetIpConfigInfoIpAddressStatus"] = "4.1" } // IP Stack keeps state on entries in IpNetToMedia table to perform // physical address lookups for IP addresses. // // Here are the standard +// states per @see RFC 4293 ipNetToMediaType. type NetIpStackInfoEntryType string const ( @@ -5279,12 +8373,26 @@ const ( NetIpStackInfoEntryTypeManual = NetIpStackInfoEntryType("manual") ) +func (e NetIpStackInfoEntryType) Values() []NetIpStackInfoEntryType { + return []NetIpStackInfoEntryType{ + NetIpStackInfoEntryTypeOther, + NetIpStackInfoEntryTypeInvalid, + NetIpStackInfoEntryTypeDynamic, + NetIpStackInfoEntryTypeManual, + } +} + +func (e NetIpStackInfoEntryType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["NetIpStackInfoEntryType"] = reflect.TypeOf((*NetIpStackInfoEntryType)(nil)).Elem() - minAPIVersionForType["NetIpStackInfoEntryType"] = "4.1" } // The set of values used to determine ordering of default routers. +// +// See RFC 4293 ipDefaultRouterPreference. type NetIpStackInfoPreference string const ( @@ -5294,9 +8402,21 @@ const ( NetIpStackInfoPreferenceHigh = NetIpStackInfoPreference("high") ) +func (e NetIpStackInfoPreference) Values() []NetIpStackInfoPreference { + return []NetIpStackInfoPreference{ + NetIpStackInfoPreferenceReserved, + NetIpStackInfoPreferenceLow, + NetIpStackInfoPreferenceMedium, + NetIpStackInfoPreferenceHigh, + } +} + +func (e NetIpStackInfoPreference) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["NetIpStackInfoPreference"] = reflect.TypeOf((*NetIpStackInfoPreference)(nil)).Elem() - minAPIVersionForType["NetIpStackInfoPreference"] = "4.1" } type NotSupportedDeviceForFTDeviceType string @@ -5308,16 +8428,25 @@ const ( NotSupportedDeviceForFTDeviceTypeParaVirtualSCSIController = NotSupportedDeviceForFTDeviceType("paraVirtualSCSIController") ) -func init() { - t["NotSupportedDeviceForFTDeviceType"] = reflect.TypeOf((*NotSupportedDeviceForFTDeviceType)(nil)).Elem() - minAPIVersionForType["NotSupportedDeviceForFTDeviceType"] = "4.1" +func (e NotSupportedDeviceForFTDeviceType) Values() []NotSupportedDeviceForFTDeviceType { + return []NotSupportedDeviceForFTDeviceType{ + NotSupportedDeviceForFTDeviceTypeVirtualVmxnet3, + NotSupportedDeviceForFTDeviceTypeParaVirtualSCSIController, + } } +func (e NotSupportedDeviceForFTDeviceType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NotSupportedDeviceForFTDeviceType"] = reflect.TypeOf((*NotSupportedDeviceForFTDeviceType)(nil)).Elem() +} + +// Reasons why the number of virtual CPUs is incompatible. type NumVirtualCpusIncompatibleReason string const ( - // - // // Deprecated as of vSphere API 6.0. // // The virtual machine needs to support record/replay functionality. @@ -5326,11 +8455,22 @@ const ( NumVirtualCpusIncompatibleReasonFaultTolerance = NumVirtualCpusIncompatibleReason("faultTolerance") ) -func init() { - t["NumVirtualCpusIncompatibleReason"] = reflect.TypeOf((*NumVirtualCpusIncompatibleReason)(nil)).Elem() - minAPIVersionForType["NumVirtualCpusIncompatibleReason"] = "4.0" +func (e NumVirtualCpusIncompatibleReason) Values() []NumVirtualCpusIncompatibleReason { + return []NumVirtualCpusIncompatibleReason{ + NumVirtualCpusIncompatibleReasonRecordReplay, + NumVirtualCpusIncompatibleReasonFaultTolerance, + } } +func (e NumVirtualCpusIncompatibleReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NumVirtualCpusIncompatibleReason"] = reflect.TypeOf((*NumVirtualCpusIncompatibleReason)(nil)).Elem() +} + +// State of interleave set type NvdimmInterleaveSetState string const ( @@ -5340,11 +8480,22 @@ const ( NvdimmInterleaveSetStateActive = NvdimmInterleaveSetState("active") ) -func init() { - t["NvdimmInterleaveSetState"] = reflect.TypeOf((*NvdimmInterleaveSetState)(nil)).Elem() - minAPIVersionForType["NvdimmInterleaveSetState"] = "6.7" +func (e NvdimmInterleaveSetState) Values() []NvdimmInterleaveSetState { + return []NvdimmInterleaveSetState{ + NvdimmInterleaveSetStateInvalid, + NvdimmInterleaveSetStateActive, + } } +func (e NvdimmInterleaveSetState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NvdimmInterleaveSetState"] = reflect.TypeOf((*NvdimmInterleaveSetState)(nil)).Elem() +} + +// Overall health state for a namespace type NvdimmNamespaceDetailsHealthStatus string const ( @@ -5360,11 +8511,25 @@ const ( NvdimmNamespaceDetailsHealthStatusLabelInconsistent = NvdimmNamespaceDetailsHealthStatus("labelInconsistent") ) -func init() { - t["NvdimmNamespaceDetailsHealthStatus"] = reflect.TypeOf((*NvdimmNamespaceDetailsHealthStatus)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceDetailsHealthStatus"] = "6.7.1" +func (e NvdimmNamespaceDetailsHealthStatus) Values() []NvdimmNamespaceDetailsHealthStatus { + return []NvdimmNamespaceDetailsHealthStatus{ + NvdimmNamespaceDetailsHealthStatusNormal, + NvdimmNamespaceDetailsHealthStatusMissing, + NvdimmNamespaceDetailsHealthStatusLabelMissing, + NvdimmNamespaceDetailsHealthStatusInterleaveBroken, + NvdimmNamespaceDetailsHealthStatusLabelInconsistent, + } } +func (e NvdimmNamespaceDetailsHealthStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NvdimmNamespaceDetailsHealthStatus"] = reflect.TypeOf((*NvdimmNamespaceDetailsHealthStatus)(nil)).Elem() +} + +// State of Namespace type NvdimmNamespaceDetailsState string const ( @@ -5376,11 +8541,23 @@ const ( NvdimmNamespaceDetailsStateInUse = NvdimmNamespaceDetailsState("inUse") ) -func init() { - t["NvdimmNamespaceDetailsState"] = reflect.TypeOf((*NvdimmNamespaceDetailsState)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceDetailsState"] = "6.7.1" +func (e NvdimmNamespaceDetailsState) Values() []NvdimmNamespaceDetailsState { + return []NvdimmNamespaceDetailsState{ + NvdimmNamespaceDetailsStateInvalid, + NvdimmNamespaceDetailsStateNotInUse, + NvdimmNamespaceDetailsStateInUse, + } } +func (e NvdimmNamespaceDetailsState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NvdimmNamespaceDetailsState"] = reflect.TypeOf((*NvdimmNamespaceDetailsState)(nil)).Elem() +} + +// Overall health state for a namespace type NvdimmNamespaceHealthStatus string const ( @@ -5400,11 +8577,27 @@ const ( NvdimmNamespaceHealthStatusBadBlockSize = NvdimmNamespaceHealthStatus("badBlockSize") ) -func init() { - t["NvdimmNamespaceHealthStatus"] = reflect.TypeOf((*NvdimmNamespaceHealthStatus)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceHealthStatus"] = "6.7" +func (e NvdimmNamespaceHealthStatus) Values() []NvdimmNamespaceHealthStatus { + return []NvdimmNamespaceHealthStatus{ + NvdimmNamespaceHealthStatusNormal, + NvdimmNamespaceHealthStatusMissing, + NvdimmNamespaceHealthStatusLabelMissing, + NvdimmNamespaceHealthStatusInterleaveBroken, + NvdimmNamespaceHealthStatusLabelInconsistent, + NvdimmNamespaceHealthStatusBttCorrupt, + NvdimmNamespaceHealthStatusBadBlockSize, + } } +func (e NvdimmNamespaceHealthStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NvdimmNamespaceHealthStatus"] = reflect.TypeOf((*NvdimmNamespaceHealthStatus)(nil)).Elem() +} + +// State of Namespace type NvdimmNamespaceState string const ( @@ -5416,11 +8609,23 @@ const ( NvdimmNamespaceStateInUse = NvdimmNamespaceState("inUse") ) -func init() { - t["NvdimmNamespaceState"] = reflect.TypeOf((*NvdimmNamespaceState)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceState"] = "6.7" +func (e NvdimmNamespaceState) Values() []NvdimmNamespaceState { + return []NvdimmNamespaceState{ + NvdimmNamespaceStateInvalid, + NvdimmNamespaceStateNotInUse, + NvdimmNamespaceStateInUse, + } } +func (e NvdimmNamespaceState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NvdimmNamespaceState"] = reflect.TypeOf((*NvdimmNamespaceState)(nil)).Elem() +} + +// Type of namespace. type NvdimmNamespaceType string const ( @@ -5430,11 +8635,22 @@ const ( NvdimmNamespaceTypePersistentNamespace = NvdimmNamespaceType("persistentNamespace") ) -func init() { - t["NvdimmNamespaceType"] = reflect.TypeOf((*NvdimmNamespaceType)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceType"] = "6.7" +func (e NvdimmNamespaceType) Values() []NvdimmNamespaceType { + return []NvdimmNamespaceType{ + NvdimmNamespaceTypeBlockNamespace, + NvdimmNamespaceTypePersistentNamespace, + } } +func (e NvdimmNamespaceType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NvdimmNamespaceType"] = reflect.TypeOf((*NvdimmNamespaceType)(nil)).Elem() +} + +// Overall state of NVDIMM type NvdimmNvdimmHealthInfoState string const ( @@ -5446,11 +8662,22 @@ const ( NvdimmNvdimmHealthInfoStateError = NvdimmNvdimmHealthInfoState("error") ) -func init() { - t["NvdimmNvdimmHealthInfoState"] = reflect.TypeOf((*NvdimmNvdimmHealthInfoState)(nil)).Elem() - minAPIVersionForType["NvdimmNvdimmHealthInfoState"] = "6.7" +func (e NvdimmNvdimmHealthInfoState) Values() []NvdimmNvdimmHealthInfoState { + return []NvdimmNvdimmHealthInfoState{ + NvdimmNvdimmHealthInfoStateNormal, + NvdimmNvdimmHealthInfoStateError, + } } +func (e NvdimmNvdimmHealthInfoState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["NvdimmNvdimmHealthInfoState"] = reflect.TypeOf((*NvdimmNvdimmHealthInfoState)(nil)).Elem() +} + +// An indicator of how a memory range is being used type NvdimmRangeType string const ( @@ -5472,9 +8699,25 @@ const ( NvdimmRangeTypePersistentVirtualCDRange = NvdimmRangeType("persistentVirtualCDRange") ) +func (e NvdimmRangeType) Values() []NvdimmRangeType { + return []NvdimmRangeType{ + NvdimmRangeTypeVolatileRange, + NvdimmRangeTypePersistentRange, + NvdimmRangeTypeControlRange, + NvdimmRangeTypeBlockRange, + NvdimmRangeTypeVolatileVirtualDiskRange, + NvdimmRangeTypeVolatileVirtualCDRange, + NvdimmRangeTypePersistentVirtualDiskRange, + NvdimmRangeTypePersistentVirtualCDRange, + } +} + +func (e NvdimmRangeType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["NvdimmRangeType"] = reflect.TypeOf((*NvdimmRangeType)(nil)).Elem() - minAPIVersionForType["NvdimmRangeType"] = "6.7" } // Enumeration of different kinds of updates. @@ -5495,6 +8738,18 @@ const ( ObjectUpdateKindLeave = ObjectUpdateKind("leave") ) +func (e ObjectUpdateKind) Values() []ObjectUpdateKind { + return []ObjectUpdateKind{ + ObjectUpdateKindModify, + ObjectUpdateKindEnter, + ObjectUpdateKindLeave, + } +} + +func (e ObjectUpdateKind) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ObjectUpdateKind"] = reflect.TypeOf((*ObjectUpdateKind)(nil)).Elem() } @@ -5502,6 +8757,7 @@ func init() { // The type of an OST node. // // Each OST node corresponds to an element in the OVF descriptor. See `OvfConsumerOstNode` +// for a description of the different node types. type OvfConsumerOstNodeType string const ( @@ -5510,12 +8766,24 @@ const ( OvfConsumerOstNodeTypeVirtualSystemCollection = OvfConsumerOstNodeType("virtualSystemCollection") ) +func (e OvfConsumerOstNodeType) Values() []OvfConsumerOstNodeType { + return []OvfConsumerOstNodeType{ + OvfConsumerOstNodeTypeEnvelope, + OvfConsumerOstNodeTypeVirtualSystem, + OvfConsumerOstNodeTypeVirtualSystemCollection, + } +} + +func (e OvfConsumerOstNodeType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["OvfConsumerOstNodeType"] = reflect.TypeOf((*OvfConsumerOstNodeType)(nil)).Elem() - minAPIVersionForType["OvfConsumerOstNodeType"] = "5.0" } // Types of disk provisioning that can be set for the disk in the deployed OVF +// package. type OvfCreateImportSpecParamsDiskProvisioningType string const ( @@ -5565,13 +8833,27 @@ const ( OvfCreateImportSpecParamsDiskProvisioningTypeFlat = OvfCreateImportSpecParamsDiskProvisioningType("flat") ) +func (e OvfCreateImportSpecParamsDiskProvisioningType) Values() []OvfCreateImportSpecParamsDiskProvisioningType { + return []OvfCreateImportSpecParamsDiskProvisioningType{ + OvfCreateImportSpecParamsDiskProvisioningTypeMonolithicSparse, + OvfCreateImportSpecParamsDiskProvisioningTypeMonolithicFlat, + OvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentSparse, + OvfCreateImportSpecParamsDiskProvisioningTypeTwoGbMaxExtentFlat, + OvfCreateImportSpecParamsDiskProvisioningTypeThin, + OvfCreateImportSpecParamsDiskProvisioningTypeThick, + OvfCreateImportSpecParamsDiskProvisioningTypeSeSparse, + OvfCreateImportSpecParamsDiskProvisioningTypeEagerZeroedThick, + OvfCreateImportSpecParamsDiskProvisioningTypeSparse, + OvfCreateImportSpecParamsDiskProvisioningTypeFlat, + } +} + +func (e OvfCreateImportSpecParamsDiskProvisioningType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["OvfCreateImportSpecParamsDiskProvisioningType"] = reflect.TypeOf((*OvfCreateImportSpecParamsDiskProvisioningType)(nil)).Elem() - minAPIVersionForType["OvfCreateImportSpecParamsDiskProvisioningType"] = "4.1" - minAPIVersionForEnumValue["OvfCreateImportSpecParamsDiskProvisioningType"] = map[string]string{ - "seSparse": "5.1", - "eagerZeroedThick": "5.0", - } } // The format in which performance counter data is returned. @@ -5584,6 +8866,17 @@ const ( PerfFormatCsv = PerfFormat("csv") ) +func (e PerfFormat) Values() []PerfFormat { + return []PerfFormat{ + PerfFormatNormal, + PerfFormatCsv, + } +} + +func (e PerfFormat) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PerfFormat"] = reflect.TypeOf((*PerfFormat)(nil)).Elem() } @@ -5619,6 +8912,18 @@ const ( PerfStatsTypeRate = PerfStatsType("rate") ) +func (e PerfStatsType) Values() []PerfStatsType { + return []PerfStatsType{ + PerfStatsTypeAbsolute, + PerfStatsTypeDelta, + PerfStatsTypeRate, + } +} + +func (e PerfStatsType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PerfStatsType"] = reflect.TypeOf((*PerfStatsType)(nil)).Elem() } @@ -5647,6 +8952,21 @@ const ( PerfSummaryTypeNone = PerfSummaryType("none") ) +func (e PerfSummaryType) Values() []PerfSummaryType { + return []PerfSummaryType{ + PerfSummaryTypeAverage, + PerfSummaryTypeMaximum, + PerfSummaryTypeMinimum, + PerfSummaryTypeLatest, + PerfSummaryTypeSummation, + PerfSummaryTypeNone, + } +} + +func (e PerfSummaryType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PerfSummaryType"] = reflect.TypeOf((*PerfSummaryType)(nil)).Elem() } @@ -5691,15 +9011,34 @@ const ( PerformanceManagerUnitNanosecond = PerformanceManagerUnit("nanosecond") ) +func (e PerformanceManagerUnit) Values() []PerformanceManagerUnit { + return []PerformanceManagerUnit{ + PerformanceManagerUnitPercent, + PerformanceManagerUnitKiloBytes, + PerformanceManagerUnitMegaBytes, + PerformanceManagerUnitMegaHertz, + PerformanceManagerUnitNumber, + PerformanceManagerUnitMicrosecond, + PerformanceManagerUnitMillisecond, + PerformanceManagerUnitSecond, + PerformanceManagerUnitKiloBytesPerSecond, + PerformanceManagerUnitMegaBytesPerSecond, + PerformanceManagerUnitWatt, + PerformanceManagerUnitJoule, + PerformanceManagerUnitTeraBytes, + PerformanceManagerUnitCelsius, + PerformanceManagerUnitNanosecond, + } +} + +func (e PerformanceManagerUnit) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PerformanceManagerUnit"] = reflect.TypeOf((*PerformanceManagerUnit)(nil)).Elem() minAPIVersionForEnumValue["PerformanceManagerUnit"] = map[string]string{ - "microsecond": "4.0", - "watt": "4.1", - "joule": "4.1", - "teraBytes": "6.0", - "celsius": "6.5", - "nanosecond": "8.0.0.1", + "nanosecond": "8.0.0.1", } } @@ -5714,20 +9053,40 @@ const ( PhysicalNicResourcePoolSchedulerDisallowedReasonHardwareUnsupported = PhysicalNicResourcePoolSchedulerDisallowedReason("hardwareUnsupported") ) -func init() { - t["PhysicalNicResourcePoolSchedulerDisallowedReason"] = reflect.TypeOf((*PhysicalNicResourcePoolSchedulerDisallowedReason)(nil)).Elem() - minAPIVersionForType["PhysicalNicResourcePoolSchedulerDisallowedReason"] = "4.1" +func (e PhysicalNicResourcePoolSchedulerDisallowedReason) Values() []PhysicalNicResourcePoolSchedulerDisallowedReason { + return []PhysicalNicResourcePoolSchedulerDisallowedReason{ + PhysicalNicResourcePoolSchedulerDisallowedReasonUserOptOut, + PhysicalNicResourcePoolSchedulerDisallowedReasonHardwareUnsupported, + } } +func (e PhysicalNicResourcePoolSchedulerDisallowedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["PhysicalNicResourcePoolSchedulerDisallowedReason"] = reflect.TypeOf((*PhysicalNicResourcePoolSchedulerDisallowedReason)(nil)).Elem() +} + +// Set of possible values for `PhysicalNic.vmDirectPathGen2SupportedMode`. type PhysicalNicVmDirectPathGen2SupportedMode string const ( PhysicalNicVmDirectPathGen2SupportedModeUpt = PhysicalNicVmDirectPathGen2SupportedMode("upt") ) +func (e PhysicalNicVmDirectPathGen2SupportedMode) Values() []PhysicalNicVmDirectPathGen2SupportedMode { + return []PhysicalNicVmDirectPathGen2SupportedMode{ + PhysicalNicVmDirectPathGen2SupportedModeUpt, + } +} + +func (e PhysicalNicVmDirectPathGen2SupportedMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PhysicalNicVmDirectPathGen2SupportedMode"] = reflect.TypeOf((*PhysicalNicVmDirectPathGen2SupportedMode)(nil)).Elem() - minAPIVersionForType["PhysicalNicVmDirectPathGen2SupportedMode"] = "4.1" } // Rule scope determines conditions when an affinity rule is @@ -5737,6 +9096,7 @@ func init() { // cluster: All Vms in the rule list are placed in a single cluster. // host: All Vms in the rule list are placed in a single host. // storagePod: All Vms in the rule list are placed in a single storagePod. +// datastore: All Vms in the rule list are placed in a single datastore. type PlacementAffinityRuleRuleScope string const ( @@ -5750,9 +9110,21 @@ const ( PlacementAffinityRuleRuleScopeDatastore = PlacementAffinityRuleRuleScope("datastore") ) +func (e PlacementAffinityRuleRuleScope) Values() []PlacementAffinityRuleRuleScope { + return []PlacementAffinityRuleRuleScope{ + PlacementAffinityRuleRuleScopeCluster, + PlacementAffinityRuleRuleScopeHost, + PlacementAffinityRuleRuleScopeStoragePod, + PlacementAffinityRuleRuleScopeDatastore, + } +} + +func (e PlacementAffinityRuleRuleScope) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PlacementAffinityRuleRuleScope"] = reflect.TypeOf((*PlacementAffinityRuleRuleScope)(nil)).Elem() - minAPIVersionForType["PlacementAffinityRuleRuleScope"] = "6.0" } // Rule type determines how the affinity rule is to be enforced: @@ -5761,6 +9133,7 @@ func init() { // // anti-affinity: Vms in the rule list are kept separate // across the objects in the rule scope. +// soft rule: The enforcement is best effort. type PlacementAffinityRuleRuleType string const ( @@ -5774,11 +9147,24 @@ const ( PlacementAffinityRuleRuleTypeSoftAntiAffinity = PlacementAffinityRuleRuleType("softAntiAffinity") ) -func init() { - t["PlacementAffinityRuleRuleType"] = reflect.TypeOf((*PlacementAffinityRuleRuleType)(nil)).Elem() - minAPIVersionForType["PlacementAffinityRuleRuleType"] = "6.0" +func (e PlacementAffinityRuleRuleType) Values() []PlacementAffinityRuleRuleType { + return []PlacementAffinityRuleRuleType{ + PlacementAffinityRuleRuleTypeAffinity, + PlacementAffinityRuleRuleTypeAntiAffinity, + PlacementAffinityRuleRuleTypeSoftAffinity, + PlacementAffinityRuleRuleTypeSoftAntiAffinity, + } } +func (e PlacementAffinityRuleRuleType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["PlacementAffinityRuleRuleType"] = reflect.TypeOf((*PlacementAffinityRuleRuleType)(nil)).Elem() +} + +// Defines the type of placement type PlacementSpecPlacementType string const ( @@ -5792,9 +9178,21 @@ const ( PlacementSpecPlacementTypeClone = PlacementSpecPlacementType("clone") ) +func (e PlacementSpecPlacementType) Values() []PlacementSpecPlacementType { + return []PlacementSpecPlacementType{ + PlacementSpecPlacementTypeCreate, + PlacementSpecPlacementTypeReconfigure, + PlacementSpecPlacementTypeRelocate, + PlacementSpecPlacementTypeClone, + } +} + +func (e PlacementSpecPlacementType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PlacementSpecPlacementType"] = reflect.TypeOf((*PlacementSpecPlacementType)(nil)).Elem() - minAPIVersionForType["PlacementSpecPlacementType"] = "6.0" } // The type of component connected to a port group. @@ -5812,6 +9210,19 @@ const ( PortGroupConnecteeTypeUnknown = PortGroupConnecteeType("unknown") ) +func (e PortGroupConnecteeType) Values() []PortGroupConnecteeType { + return []PortGroupConnecteeType{ + PortGroupConnecteeTypeVirtualMachine, + PortGroupConnecteeTypeSystemManagement, + PortGroupConnecteeTypeHost, + PortGroupConnecteeTypeUnknown, + } +} + +func (e PortGroupConnecteeType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PortGroupConnecteeType"] = reflect.TypeOf((*PortGroupConnecteeType)(nil)).Elem() } @@ -5821,6 +9232,7 @@ func init() { // operation. // // The result data is contained in the +// `ProfileExecuteResult` data object. type ProfileExecuteResultStatus string const ( @@ -5840,12 +9252,24 @@ const ( ProfileExecuteResultStatusError = ProfileExecuteResultStatus("error") ) +func (e ProfileExecuteResultStatus) Values() []ProfileExecuteResultStatus { + return []ProfileExecuteResultStatus{ + ProfileExecuteResultStatusSuccess, + ProfileExecuteResultStatusNeedInput, + ProfileExecuteResultStatusError, + } +} + +func (e ProfileExecuteResultStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ProfileExecuteResultStatus"] = reflect.TypeOf((*ProfileExecuteResultStatus)(nil)).Elem() - minAPIVersionForType["ProfileExecuteResultStatus"] = "4.0" } // Enumerates different operations supported for comparing +// numerical values. type ProfileNumericComparator string const ( @@ -5857,11 +9281,26 @@ const ( ProfileNumericComparatorGreaterThan = ProfileNumericComparator("greaterThan") ) -func init() { - t["ProfileNumericComparator"] = reflect.TypeOf((*ProfileNumericComparator)(nil)).Elem() - minAPIVersionForType["ProfileNumericComparator"] = "4.0" +func (e ProfileNumericComparator) Values() []ProfileNumericComparator { + return []ProfileNumericComparator{ + ProfileNumericComparatorLessThan, + ProfileNumericComparatorLessThanEqual, + ProfileNumericComparatorEqual, + ProfileNumericComparatorNotEqual, + ProfileNumericComparatorGreaterThanEqual, + ProfileNumericComparatorGreaterThan, + } } +func (e ProfileNumericComparator) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ProfileNumericComparator"] = reflect.TypeOf((*ProfileNumericComparator)(nil)).Elem() +} + +// The relation type to be supported. type ProfileParameterMetadataRelationType string const ( @@ -5878,9 +9317,22 @@ const ( ProfileParameterMetadataRelationTypeValidation_relation = ProfileParameterMetadataRelationType("validation_relation") ) +func (e ProfileParameterMetadataRelationType) Values() []ProfileParameterMetadataRelationType { + return []ProfileParameterMetadataRelationType{ + ProfileParameterMetadataRelationTypeDynamic_relation, + ProfileParameterMetadataRelationTypeExtensible_relation, + ProfileParameterMetadataRelationTypeLocalizable_relation, + ProfileParameterMetadataRelationTypeStatic_relation, + ProfileParameterMetadataRelationTypeValidation_relation, + } +} + +func (e ProfileParameterMetadataRelationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ProfileParameterMetadataRelationType"] = reflect.TypeOf((*ProfileParameterMetadataRelationType)(nil)).Elem() - minAPIVersionForType["ProfileParameterMetadataRelationType"] = "6.7" } // Enumeration of possible changes to a property. @@ -5893,6 +9345,19 @@ const ( PropertyChangeOpIndirectRemove = PropertyChangeOp("indirectRemove") ) +func (e PropertyChangeOp) Values() []PropertyChangeOp { + return []PropertyChangeOp{ + PropertyChangeOpAdd, + PropertyChangeOpRemove, + PropertyChangeOpAssign, + PropertyChangeOpIndirectRemove, + } +} + +func (e PropertyChangeOp) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["PropertyChangeOp"] = reflect.TypeOf((*PropertyChangeOp)(nil)).Elem() } @@ -5915,9 +9380,20 @@ const ( QuarantineModeFaultFaultTypeCorrectionImpact = QuarantineModeFaultFaultType("CorrectionImpact") ) +func (e QuarantineModeFaultFaultType) Values() []QuarantineModeFaultFaultType { + return []QuarantineModeFaultFaultType{ + QuarantineModeFaultFaultTypeNoCompatibleNonQuarantinedHost, + QuarantineModeFaultFaultTypeCorrectionDisallowed, + QuarantineModeFaultFaultTypeCorrectionImpact, + } +} + +func (e QuarantineModeFaultFaultType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["QuarantineModeFaultFaultType"] = reflect.TypeOf((*QuarantineModeFaultFaultType)(nil)).Elem() - minAPIVersionForType["QuarantineModeFaultFaultType"] = "6.5" } // Quiescing is a boolean flag in `ReplicationConfigSpec` @@ -5927,6 +9403,7 @@ func init() { // If application quiescing fails, HBR would attempt // filesystem quiescing and if even filesystem quiescing // fails, then we would just create a crash consistent +// instance. type QuiesceMode string const ( @@ -5941,11 +9418,23 @@ const ( QuiesceModeNone = QuiesceMode("none") ) -func init() { - t["QuiesceMode"] = reflect.TypeOf((*QuiesceMode)(nil)).Elem() - minAPIVersionForType["QuiesceMode"] = "6.0" +func (e QuiesceMode) Values() []QuiesceMode { + return []QuiesceMode{ + QuiesceModeApplication, + QuiesceModeFilesystem, + QuiesceModeNone, + } } +func (e QuiesceMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["QuiesceMode"] = reflect.TypeOf((*QuiesceMode)(nil)).Elem() +} + +// List of defined migration reason codes: type RecommendationReasonCode string const ( @@ -5981,8 +9470,12 @@ const ( RecommendationReasonCodeVmHostSoftAffinity = RecommendationReasonCode("vmHostSoftAffinity") // Balance datastore space usage. RecommendationReasonCodeBalanceDatastoreSpaceUsage = RecommendationReasonCode("balanceDatastoreSpaceUsage") + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Balance datastore I/O workload. RecommendationReasonCodeBalanceDatastoreIOLoad = RecommendationReasonCode("balanceDatastoreIOLoad") + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Balance datastore IOPS reservation RecommendationReasonCodeBalanceDatastoreIOPSReservation = RecommendationReasonCode("balanceDatastoreIOPSReservation") // Datastore entering maintenance mode. @@ -5995,6 +9488,8 @@ const ( RecommendationReasonCodeDatastoreSpaceOutage = RecommendationReasonCode("datastoreSpaceOutage") // Satisfy storage initial placement requests. RecommendationReasonCodeStoragePlacement = RecommendationReasonCode("storagePlacement") + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // IO load balancing was disabled internally. RecommendationReasonCodeIolbDisabledInternal = RecommendationReasonCode("iolbDisabledInternal") // Satisfy unified vmotion placement requests. @@ -6019,51 +9514,86 @@ const ( RecommendationReasonCodeBalanceVsanUsage = RecommendationReasonCode("balanceVsanUsage") // Optimize assignable hardware resource orchestration RecommendationReasonCodeAhPlacementOptimization = RecommendationReasonCode("ahPlacementOptimization") + // Upgrade virtual machine to new vmx binary + RecommendationReasonCodeVmxUpgrade = RecommendationReasonCode("vmxUpgrade") ) +func (e RecommendationReasonCode) Values() []RecommendationReasonCode { + return []RecommendationReasonCode{ + RecommendationReasonCodeFairnessCpuAvg, + RecommendationReasonCodeFairnessMemAvg, + RecommendationReasonCodeJointAffin, + RecommendationReasonCodeAntiAffin, + RecommendationReasonCodeHostMaint, + RecommendationReasonCodeEnterStandby, + RecommendationReasonCodeReservationCpu, + RecommendationReasonCodeReservationMem, + RecommendationReasonCodePowerOnVm, + RecommendationReasonCodePowerSaving, + RecommendationReasonCodeIncreaseCapacity, + RecommendationReasonCodeCheckResource, + RecommendationReasonCodeUnreservedCapacity, + RecommendationReasonCodeVmHostHardAffinity, + RecommendationReasonCodeVmHostSoftAffinity, + RecommendationReasonCodeBalanceDatastoreSpaceUsage, + RecommendationReasonCodeBalanceDatastoreIOLoad, + RecommendationReasonCodeBalanceDatastoreIOPSReservation, + RecommendationReasonCodeDatastoreMaint, + RecommendationReasonCodeVirtualDiskJointAffin, + RecommendationReasonCodeVirtualDiskAntiAffin, + RecommendationReasonCodeDatastoreSpaceOutage, + RecommendationReasonCodeStoragePlacement, + RecommendationReasonCodeIolbDisabledInternal, + RecommendationReasonCodeXvmotionPlacement, + RecommendationReasonCodeNetworkBandwidthReservation, + RecommendationReasonCodeHostInDegradation, + RecommendationReasonCodeHostExitDegradation, + RecommendationReasonCodeMaxVmsConstraint, + RecommendationReasonCodeFtConstraints, + RecommendationReasonCodeVmHostAffinityPolicy, + RecommendationReasonCodeVmHostAntiAffinityPolicy, + RecommendationReasonCodeVmAntiAffinityPolicy, + RecommendationReasonCodeBalanceVsanUsage, + RecommendationReasonCodeAhPlacementOptimization, + RecommendationReasonCodeVmxUpgrade, + } +} + +func (e RecommendationReasonCode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["RecommendationReasonCode"] = reflect.TypeOf((*RecommendationReasonCode)(nil)).Elem() - minAPIVersionForType["RecommendationReasonCode"] = "2.5" minAPIVersionForEnumValue["RecommendationReasonCode"] = map[string]string{ - "checkResource": "4.0", - "unreservedCapacity": "4.0", - "vmHostHardAffinity": "4.1", - "vmHostSoftAffinity": "4.1", - "balanceDatastoreSpaceUsage": "5.0", - "balanceDatastoreIOLoad": "5.0", - "balanceDatastoreIOPSReservation": "6.0", - "datastoreMaint": "5.0", - "virtualDiskJointAffin": "5.0", - "virtualDiskAntiAffin": "5.0", - "datastoreSpaceOutage": "5.0", - "storagePlacement": "5.0", - "iolbDisabledInternal": "5.0", - "xvmotionPlacement": "6.0", - "networkBandwidthReservation": "6.0", - "hostInDegradation": "6.5", - "hostExitDegradation": "6.5", - "maxVmsConstraint": "6.5", - "ftConstraints": "6.5", - "vmHostAffinityPolicy": "6.8.7", - "vmHostAntiAffinityPolicy": "6.8.7", - "vmAntiAffinityPolicy": "6.8.7", - "balanceVsanUsage": "7.0.2.0", - "ahPlacementOptimization": "8.0.2.0", + "balanceVsanUsage": "7.0.2.0", + "ahPlacementOptimization": "8.0.2.0", + "vmxUpgrade": "8.0.3.0", } } // Pre-defined constants for possible recommendation types. // // Virtual Center +// uses this information to coordinate with the clients. type RecommendationType string const ( RecommendationTypeV1 = RecommendationType("V1") ) +func (e RecommendationType) Values() []RecommendationType { + return []RecommendationType{ + RecommendationTypeV1, + } +} + +func (e RecommendationType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["RecommendationType"] = reflect.TypeOf((*RecommendationType)(nil)).Elem() - minAPIVersionForType["RecommendationType"] = "2.5" } type ReplicationDiskConfigFaultReasonForFault string @@ -6085,9 +9615,24 @@ const ( ReplicationDiskConfigFaultReasonForFaultReconfigureDiskReplicationIdNotAllowed = ReplicationDiskConfigFaultReasonForFault("reconfigureDiskReplicationIdNotAllowed") ) +func (e ReplicationDiskConfigFaultReasonForFault) Values() []ReplicationDiskConfigFaultReasonForFault { + return []ReplicationDiskConfigFaultReasonForFault{ + ReplicationDiskConfigFaultReasonForFaultDiskNotFound, + ReplicationDiskConfigFaultReasonForFaultDiskTypeNotSupported, + ReplicationDiskConfigFaultReasonForFaultInvalidDiskKey, + ReplicationDiskConfigFaultReasonForFaultInvalidDiskReplicationId, + ReplicationDiskConfigFaultReasonForFaultDuplicateDiskReplicationId, + ReplicationDiskConfigFaultReasonForFaultInvalidPersistentFilePath, + ReplicationDiskConfigFaultReasonForFaultReconfigureDiskReplicationIdNotAllowed, + } +} + +func (e ReplicationDiskConfigFaultReasonForFault) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ReplicationDiskConfigFaultReasonForFault"] = reflect.TypeOf((*ReplicationDiskConfigFaultReasonForFault)(nil)).Elem() - minAPIVersionForType["ReplicationDiskConfigFaultReasonForFault"] = "5.0" } type ReplicationVmConfigFaultReasonForFault string @@ -6131,14 +9676,34 @@ const ( ReplicationVmConfigFaultReasonForFaultIncompatibleDevice = ReplicationVmConfigFaultReasonForFault("incompatibleDevice") ) +func (e ReplicationVmConfigFaultReasonForFault) Values() []ReplicationVmConfigFaultReasonForFault { + return []ReplicationVmConfigFaultReasonForFault{ + ReplicationVmConfigFaultReasonForFaultIncompatibleHwVersion, + ReplicationVmConfigFaultReasonForFaultInvalidVmReplicationId, + ReplicationVmConfigFaultReasonForFaultInvalidGenerationNumber, + ReplicationVmConfigFaultReasonForFaultOutOfBoundsRpoValue, + ReplicationVmConfigFaultReasonForFaultInvalidDestinationIpAddress, + ReplicationVmConfigFaultReasonForFaultInvalidDestinationPort, + ReplicationVmConfigFaultReasonForFaultInvalidExtraVmOptions, + ReplicationVmConfigFaultReasonForFaultStaleGenerationNumber, + ReplicationVmConfigFaultReasonForFaultReconfigureVmReplicationIdNotAllowed, + ReplicationVmConfigFaultReasonForFaultCannotRetrieveVmReplicationConfiguration, + ReplicationVmConfigFaultReasonForFaultReplicationAlreadyEnabled, + ReplicationVmConfigFaultReasonForFaultInvalidPriorConfiguration, + ReplicationVmConfigFaultReasonForFaultReplicationNotEnabled, + ReplicationVmConfigFaultReasonForFaultReplicationConfigurationFailed, + ReplicationVmConfigFaultReasonForFaultEncryptedVm, + ReplicationVmConfigFaultReasonForFaultInvalidThumbprint, + ReplicationVmConfigFaultReasonForFaultIncompatibleDevice, + } +} + +func (e ReplicationVmConfigFaultReasonForFault) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ReplicationVmConfigFaultReasonForFault"] = reflect.TypeOf((*ReplicationVmConfigFaultReasonForFault)(nil)).Elem() - minAPIVersionForType["ReplicationVmConfigFaultReasonForFault"] = "5.0" - minAPIVersionForEnumValue["ReplicationVmConfigFaultReasonForFault"] = map[string]string{ - "encryptedVm": "6.5", - "invalidThumbprint": "6.7", - "incompatibleDevice": "6.7", - } } type ReplicationVmFaultReasonForFault string @@ -6173,13 +9738,26 @@ const ( ReplicationVmFaultReasonForFaultGroupExist = ReplicationVmFaultReasonForFault("groupExist") ) +func (e ReplicationVmFaultReasonForFault) Values() []ReplicationVmFaultReasonForFault { + return []ReplicationVmFaultReasonForFault{ + ReplicationVmFaultReasonForFaultNotConfigured, + ReplicationVmFaultReasonForFaultPoweredOff, + ReplicationVmFaultReasonForFaultSuspended, + ReplicationVmFaultReasonForFaultPoweredOn, + ReplicationVmFaultReasonForFaultOfflineReplicating, + ReplicationVmFaultReasonForFaultInvalidState, + ReplicationVmFaultReasonForFaultInvalidInstanceId, + ReplicationVmFaultReasonForFaultCloseDiskError, + ReplicationVmFaultReasonForFaultGroupExist, + } +} + +func (e ReplicationVmFaultReasonForFault) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ReplicationVmFaultReasonForFault"] = reflect.TypeOf((*ReplicationVmFaultReasonForFault)(nil)).Elem() - minAPIVersionForType["ReplicationVmFaultReasonForFault"] = "5.0" - minAPIVersionForEnumValue["ReplicationVmFaultReasonForFault"] = map[string]string{ - "closeDiskError": "6.5", - "groupExist": "6.7", - } } type ReplicationVmInProgressFaultActivity string @@ -6191,11 +9769,22 @@ const ( ReplicationVmInProgressFaultActivityDelta = ReplicationVmInProgressFaultActivity("delta") ) -func init() { - t["ReplicationVmInProgressFaultActivity"] = reflect.TypeOf((*ReplicationVmInProgressFaultActivity)(nil)).Elem() - minAPIVersionForType["ReplicationVmInProgressFaultActivity"] = "6.0" +func (e ReplicationVmInProgressFaultActivity) Values() []ReplicationVmInProgressFaultActivity { + return []ReplicationVmInProgressFaultActivity{ + ReplicationVmInProgressFaultActivityFullSync, + ReplicationVmInProgressFaultActivityDelta, + } } +func (e ReplicationVmInProgressFaultActivity) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ReplicationVmInProgressFaultActivity"] = reflect.TypeOf((*ReplicationVmInProgressFaultActivity)(nil)).Elem() +} + +// Describes the current state of a replicated `VirtualMachine` type ReplicationVmState string const ( @@ -6222,9 +9811,23 @@ const ( ReplicationVmStateError = ReplicationVmState("error") ) +func (e ReplicationVmState) Values() []ReplicationVmState { + return []ReplicationVmState{ + ReplicationVmStateNone, + ReplicationVmStatePaused, + ReplicationVmStateSyncing, + ReplicationVmStateIdle, + ReplicationVmStateActive, + ReplicationVmStateError, + } +} + +func (e ReplicationVmState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ReplicationVmState"] = reflect.TypeOf((*ReplicationVmState)(nil)).Elem() - minAPIVersionForType["ReplicationVmState"] = "5.0" } type ResourceConfigSpecScaleSharesBehavior string @@ -6236,12 +9839,23 @@ const ( ResourceConfigSpecScaleSharesBehaviorScaleCpuAndMemoryShares = ResourceConfigSpecScaleSharesBehavior("scaleCpuAndMemoryShares") ) +func (e ResourceConfigSpecScaleSharesBehavior) Values() []ResourceConfigSpecScaleSharesBehavior { + return []ResourceConfigSpecScaleSharesBehavior{ + ResourceConfigSpecScaleSharesBehaviorDisabled, + ResourceConfigSpecScaleSharesBehaviorScaleCpuAndMemoryShares, + } +} + +func (e ResourceConfigSpecScaleSharesBehavior) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ResourceConfigSpecScaleSharesBehavior"] = reflect.TypeOf((*ResourceConfigSpecScaleSharesBehavior)(nil)).Elem() - minAPIVersionForType["ResourceConfigSpecScaleSharesBehavior"] = "7.0" } // The policy setting used to determine when to perform scheduled +// upgrades for a virtual machine. type ScheduledHardwareUpgradeInfoHardwareUpgradePolicy string const ( @@ -6253,11 +9867,23 @@ const ( ScheduledHardwareUpgradeInfoHardwareUpgradePolicyAlways = ScheduledHardwareUpgradeInfoHardwareUpgradePolicy("always") ) -func init() { - t["ScheduledHardwareUpgradeInfoHardwareUpgradePolicy"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradePolicy)(nil)).Elem() - minAPIVersionForType["ScheduledHardwareUpgradeInfoHardwareUpgradePolicy"] = "5.1" +func (e ScheduledHardwareUpgradeInfoHardwareUpgradePolicy) Values() []ScheduledHardwareUpgradeInfoHardwareUpgradePolicy { + return []ScheduledHardwareUpgradeInfoHardwareUpgradePolicy{ + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyNever, + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyOnSoftPowerOff, + ScheduledHardwareUpgradeInfoHardwareUpgradePolicyAlways, + } } +func (e ScheduledHardwareUpgradeInfoHardwareUpgradePolicy) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ScheduledHardwareUpgradeInfoHardwareUpgradePolicy"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradePolicy)(nil)).Elem() +} + +// Status for last attempt to run scheduled hardware upgrade. type ScheduledHardwareUpgradeInfoHardwareUpgradeStatus string const ( @@ -6269,17 +9895,30 @@ const ( ScheduledHardwareUpgradeInfoHardwareUpgradeStatusSuccess = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("success") // Upgrade failed. // - // For more information about the failure + // # For more information about the failure // // See also `ScheduledHardwareUpgradeInfo.fault`. ScheduledHardwareUpgradeInfoHardwareUpgradeStatusFailed = ScheduledHardwareUpgradeInfoHardwareUpgradeStatus("failed") ) -func init() { - t["ScheduledHardwareUpgradeInfoHardwareUpgradeStatus"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradeStatus)(nil)).Elem() - minAPIVersionForType["ScheduledHardwareUpgradeInfoHardwareUpgradeStatus"] = "5.1" +func (e ScheduledHardwareUpgradeInfoHardwareUpgradeStatus) Values() []ScheduledHardwareUpgradeInfoHardwareUpgradeStatus { + return []ScheduledHardwareUpgradeInfoHardwareUpgradeStatus{ + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusNone, + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusPending, + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusSuccess, + ScheduledHardwareUpgradeInfoHardwareUpgradeStatusFailed, + } } +func (e ScheduledHardwareUpgradeInfoHardwareUpgradeStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ScheduledHardwareUpgradeInfoHardwareUpgradeStatus"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfoHardwareUpgradeStatus)(nil)).Elem() +} + +// The types of disk drives. type ScsiDiskType string const ( @@ -6295,15 +9934,26 @@ const ( ScsiDiskTypeUnknown = ScsiDiskType("unknown") ) -func init() { - t["ScsiDiskType"] = reflect.TypeOf((*ScsiDiskType)(nil)).Elem() - minAPIVersionForType["ScsiDiskType"] = "6.5" - minAPIVersionForEnumValue["ScsiDiskType"] = map[string]string{ - "SoftwareEmulated4k": "6.7", +func (e ScsiDiskType) Values() []ScsiDiskType { + return []ScsiDiskType{ + ScsiDiskTypeNative512, + ScsiDiskTypeEmulated512, + ScsiDiskTypeNative4k, + ScsiDiskTypeSoftwareEmulated4k, + ScsiDiskTypeUnknown, } } +func (e ScsiDiskType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ScsiDiskType"] = reflect.TypeOf((*ScsiDiskType)(nil)).Elem() +} + // An indicator of the utility of Descriptor in being used as an +// identifier that is stable, unique, and correlatable. type ScsiLunDescriptorQuality string const ( @@ -6321,9 +9971,48 @@ const ( ScsiLunDescriptorQualityUnknownQuality = ScsiLunDescriptorQuality("unknownQuality") ) +func (e ScsiLunDescriptorQuality) Values() []ScsiLunDescriptorQuality { + return []ScsiLunDescriptorQuality{ + ScsiLunDescriptorQualityHighQuality, + ScsiLunDescriptorQualityMediumQuality, + ScsiLunDescriptorQualityLowQuality, + ScsiLunDescriptorQualityUnknownQuality, + } +} + +func (e ScsiLunDescriptorQuality) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ScsiLunDescriptorQuality"] = reflect.TypeOf((*ScsiLunDescriptorQuality)(nil)).Elem() - minAPIVersionForType["ScsiLunDescriptorQuality"] = "4.0" +} + +type ScsiLunLunReservationStatus string + +const ( + ScsiLunLunReservationStatusLUN_RESERVED_UNKNOWN = ScsiLunLunReservationStatus("LUN_RESERVED_UNKNOWN") + ScsiLunLunReservationStatusLUN_RESERVED_YES = ScsiLunLunReservationStatus("LUN_RESERVED_YES") + ScsiLunLunReservationStatusLUN_RESERVED_NO = ScsiLunLunReservationStatus("LUN_RESERVED_NO") + ScsiLunLunReservationStatusLUN_RESERVED_NOT_SUPPORTED = ScsiLunLunReservationStatus("LUN_RESERVED_NOT_SUPPORTED") +) + +func (e ScsiLunLunReservationStatus) Values() []ScsiLunLunReservationStatus { + return []ScsiLunLunReservationStatus{ + ScsiLunLunReservationStatusLUN_RESERVED_UNKNOWN, + ScsiLunLunReservationStatusLUN_RESERVED_YES, + ScsiLunLunReservationStatusLUN_RESERVED_NO, + ScsiLunLunReservationStatusLUN_RESERVED_NOT_SUPPORTED, + } +} + +func (e ScsiLunLunReservationStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["ScsiLunLunReservationStatus"] = reflect.TypeOf((*ScsiLunLunReservationStatus)(nil)).Elem() + minAPIVersionForType["ScsiLunLunReservationStatus"] = "8.0.3.0" } // The Operational state of the LUN @@ -6353,13 +10042,25 @@ const ( ScsiLunStateTimeout = ScsiLunState("timeout") ) +func (e ScsiLunState) Values() []ScsiLunState { + return []ScsiLunState{ + ScsiLunStateUnknownState, + ScsiLunStateOk, + ScsiLunStateError, + ScsiLunStateOff, + ScsiLunStateQuiesced, + ScsiLunStateDegraded, + ScsiLunStateLostCommunication, + ScsiLunStateTimeout, + } +} + +func (e ScsiLunState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ScsiLunState"] = reflect.TypeOf((*ScsiLunState)(nil)).Elem() - minAPIVersionForEnumValue["ScsiLunState"] = map[string]string{ - "off": "4.0", - "quiesced": "4.0", - "timeout": "5.1", - } } // The list of SCSI device types. @@ -6384,6 +10085,28 @@ const ( ScsiLunTypeUnknown = ScsiLunType("unknown") ) +func (e ScsiLunType) Values() []ScsiLunType { + return []ScsiLunType{ + ScsiLunTypeDisk, + ScsiLunTypeTape, + ScsiLunTypePrinter, + ScsiLunTypeProcessor, + ScsiLunTypeWorm, + ScsiLunTypeCdrom, + ScsiLunTypeScanner, + ScsiLunTypeOpticalDevice, + ScsiLunTypeMediaChanger, + ScsiLunTypeCommunications, + ScsiLunTypeStorageArrayController, + ScsiLunTypeEnclosure, + ScsiLunTypeUnknown, + } +} + +func (e ScsiLunType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ScsiLunType"] = reflect.TypeOf((*ScsiLunType)(nil)).Elem() } @@ -6393,6 +10116,7 @@ func init() { // When a host boots, the support status is unknown. // As a host attempts hardware-accelerated operations, // it determines whether the storage device supports hardware acceleration +// and sets the `ScsiLun.vStorageSupport` property accordingly. type ScsiLunVStorageSupportStatus string const ( @@ -6409,9 +10133,20 @@ const ( ScsiLunVStorageSupportStatusVStorageUnknown = ScsiLunVStorageSupportStatus("vStorageUnknown") ) +func (e ScsiLunVStorageSupportStatus) Values() []ScsiLunVStorageSupportStatus { + return []ScsiLunVStorageSupportStatus{ + ScsiLunVStorageSupportStatusVStorageSupported, + ScsiLunVStorageSupportStatusVStorageUnsupported, + ScsiLunVStorageSupportStatusVStorageUnknown, + } +} + +func (e ScsiLunVStorageSupportStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ScsiLunVStorageSupportStatus"] = reflect.TypeOf((*ScsiLunVStorageSupportStatus)(nil)).Elem() - minAPIVersionForType["ScsiLunVStorageSupportStatus"] = "4.1" } type SessionManagerGenericServiceTicketTicketType string @@ -6425,11 +10160,24 @@ const ( SessionManagerGenericServiceTicketTicketTypeVcServiceTicket = SessionManagerGenericServiceTicketTicketType("VcServiceTicket") ) +func (e SessionManagerGenericServiceTicketTicketType) Values() []SessionManagerGenericServiceTicketTicketType { + return []SessionManagerGenericServiceTicketTicketType{ + SessionManagerGenericServiceTicketTicketTypeHttpNfcServiceTicket, + SessionManagerGenericServiceTicketTicketTypeHostServiceTicket, + SessionManagerGenericServiceTicketTicketTypeVcServiceTicket, + } +} + +func (e SessionManagerGenericServiceTicketTicketType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["SessionManagerGenericServiceTicketTicketType"] = reflect.TypeOf((*SessionManagerGenericServiceTicketTicketType)(nil)).Elem() minAPIVersionForType["SessionManagerGenericServiceTicketTicketType"] = "7.0.2.0" } +// HTTP request methods. type SessionManagerHttpServiceRequestSpecMethod string const ( @@ -6443,9 +10191,25 @@ const ( SessionManagerHttpServiceRequestSpecMethodHttpConnect = SessionManagerHttpServiceRequestSpecMethod("httpConnect") ) +func (e SessionManagerHttpServiceRequestSpecMethod) Values() []SessionManagerHttpServiceRequestSpecMethod { + return []SessionManagerHttpServiceRequestSpecMethod{ + SessionManagerHttpServiceRequestSpecMethodHttpOptions, + SessionManagerHttpServiceRequestSpecMethodHttpGet, + SessionManagerHttpServiceRequestSpecMethodHttpHead, + SessionManagerHttpServiceRequestSpecMethodHttpPost, + SessionManagerHttpServiceRequestSpecMethodHttpPut, + SessionManagerHttpServiceRequestSpecMethodHttpDelete, + SessionManagerHttpServiceRequestSpecMethodHttpTrace, + SessionManagerHttpServiceRequestSpecMethodHttpConnect, + } +} + +func (e SessionManagerHttpServiceRequestSpecMethod) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["SessionManagerHttpServiceRequestSpecMethod"] = reflect.TypeOf((*SessionManagerHttpServiceRequestSpecMethod)(nil)).Elem() - minAPIVersionForType["SessionManagerHttpServiceRequestSpecMethod"] = "5.0" } // Simplified shares notation. @@ -6473,6 +10237,19 @@ const ( SharesLevelCustom = SharesLevel("custom") ) +func (e SharesLevel) Values() []SharesLevel { + return []SharesLevel{ + SharesLevelLow, + SharesLevelNormal, + SharesLevelHigh, + SharesLevelCustom, + } +} + +func (e SharesLevel) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["SharesLevel"] = reflect.TypeOf((*SharesLevel)(nil)).Elem() } @@ -6480,6 +10257,7 @@ func init() { // The encoding of the resultant return data. // // This is a hint to the client side +// to indicate the format of the information being returned. type SimpleCommandEncoding string const ( @@ -6490,9 +10268,20 @@ const ( SimpleCommandEncodingSTRING = SimpleCommandEncoding("STRING") ) +func (e SimpleCommandEncoding) Values() []SimpleCommandEncoding { + return []SimpleCommandEncoding{ + SimpleCommandEncodingCSV, + SimpleCommandEncodingHEX, + SimpleCommandEncodingSTRING, + } +} + +func (e SimpleCommandEncoding) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["SimpleCommandEncoding"] = reflect.TypeOf((*SimpleCommandEncoding)(nil)).Elem() - minAPIVersionForType["SimpleCommandEncoding"] = "2.5" } // The available SLP discovery methods. @@ -6511,10 +10300,24 @@ const ( SlpDiscoveryMethodSlpManual = SlpDiscoveryMethod("slpManual") ) +func (e SlpDiscoveryMethod) Values() []SlpDiscoveryMethod { + return []SlpDiscoveryMethod{ + SlpDiscoveryMethodSlpDhcp, + SlpDiscoveryMethodSlpAutoUnicast, + SlpDiscoveryMethodSlpAutoMulticast, + SlpDiscoveryMethodSlpManual, + } +} + +func (e SlpDiscoveryMethod) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["SlpDiscoveryMethod"] = reflect.TypeOf((*SlpDiscoveryMethod)(nil)).Elem() } +// These are the constraint relationships between software packages. type SoftwarePackageConstraint string const ( @@ -6525,9 +10328,22 @@ const ( SoftwarePackageConstraintGreaterThan = SoftwarePackageConstraint("greaterThan") ) +func (e SoftwarePackageConstraint) Values() []SoftwarePackageConstraint { + return []SoftwarePackageConstraint{ + SoftwarePackageConstraintEquals, + SoftwarePackageConstraintLessThan, + SoftwarePackageConstraintLessThanEqual, + SoftwarePackageConstraintGreaterThanEqual, + SoftwarePackageConstraintGreaterThan, + } +} + +func (e SoftwarePackageConstraint) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["SoftwarePackageConstraint"] = reflect.TypeOf((*SoftwarePackageConstraint)(nil)).Elem() - minAPIVersionForType["SoftwarePackageConstraint"] = "6.5" } type SoftwarePackageVibType string @@ -6542,9 +10358,20 @@ const ( SoftwarePackageVibTypeMeta = SoftwarePackageVibType("meta") ) +func (e SoftwarePackageVibType) Values() []SoftwarePackageVibType { + return []SoftwarePackageVibType{ + SoftwarePackageVibTypeBootbank, + SoftwarePackageVibTypeTools, + SoftwarePackageVibTypeMeta, + } +} + +func (e SoftwarePackageVibType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["SoftwarePackageVibType"] = reflect.TypeOf((*SoftwarePackageVibType)(nil)).Elem() - minAPIVersionForType["SoftwarePackageVibType"] = "6.5" } // The operation on the target state. @@ -6557,10 +10384,22 @@ const ( StateAlarmOperatorIsUnequal = StateAlarmOperator("isUnequal") ) +func (e StateAlarmOperator) Values() []StateAlarmOperator { + return []StateAlarmOperator{ + StateAlarmOperatorIsEqual, + StateAlarmOperatorIsUnequal, + } +} + +func (e StateAlarmOperator) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["StateAlarmOperator"] = reflect.TypeOf((*StateAlarmOperator)(nil)).Elem() } +// Storage DRS behavior. type StorageDrsPodConfigInfoBehavior string const ( @@ -6578,11 +10417,22 @@ const ( StorageDrsPodConfigInfoBehaviorAutomated = StorageDrsPodConfigInfoBehavior("automated") ) -func init() { - t["StorageDrsPodConfigInfoBehavior"] = reflect.TypeOf((*StorageDrsPodConfigInfoBehavior)(nil)).Elem() - minAPIVersionForType["StorageDrsPodConfigInfoBehavior"] = "5.0" +func (e StorageDrsPodConfigInfoBehavior) Values() []StorageDrsPodConfigInfoBehavior { + return []StorageDrsPodConfigInfoBehavior{ + StorageDrsPodConfigInfoBehaviorManual, + StorageDrsPodConfigInfoBehaviorAutomated, + } } +func (e StorageDrsPodConfigInfoBehavior) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["StorageDrsPodConfigInfoBehavior"] = reflect.TypeOf((*StorageDrsPodConfigInfoBehavior)(nil)).Elem() +} + +// Defines the two ways a space utilization threshold can be specified. type StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode string const ( @@ -6592,14 +10442,27 @@ const ( StorageDrsSpaceLoadBalanceConfigSpaceThresholdModeFreeSpace = StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode("freeSpace") ) -func init() { - t["StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode)(nil)).Elem() - minAPIVersionForType["StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode"] = "6.0" +func (e StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode) Values() []StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode { + return []StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode{ + StorageDrsSpaceLoadBalanceConfigSpaceThresholdModeUtilization, + StorageDrsSpaceLoadBalanceConfigSpaceThresholdModeFreeSpace, + } } -// User specification of congestion threshold mode on a given datastore +func (e StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfigSpaceThresholdMode)(nil)).Elem() +} + +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// +// # User specification of congestion threshold mode on a given datastore // // For more information, see +// `StorageIORMInfo.congestionThreshold` type StorageIORMThresholdMode string const ( @@ -6612,11 +10475,22 @@ const ( StorageIORMThresholdModeManual = StorageIORMThresholdMode("manual") ) -func init() { - t["StorageIORMThresholdMode"] = reflect.TypeOf((*StorageIORMThresholdMode)(nil)).Elem() - minAPIVersionForType["StorageIORMThresholdMode"] = "5.1" +func (e StorageIORMThresholdMode) Values() []StorageIORMThresholdMode { + return []StorageIORMThresholdMode{ + StorageIORMThresholdModeAutomatic, + StorageIORMThresholdModeManual, + } } +func (e StorageIORMThresholdMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["StorageIORMThresholdMode"] = reflect.TypeOf((*StorageIORMThresholdMode)(nil)).Elem() +} + +// Defines the storage placement operation type. type StoragePlacementSpecPlacementType string const ( @@ -6630,9 +10504,21 @@ const ( StoragePlacementSpecPlacementTypeClone = StoragePlacementSpecPlacementType("clone") ) +func (e StoragePlacementSpecPlacementType) Values() []StoragePlacementSpecPlacementType { + return []StoragePlacementSpecPlacementType{ + StoragePlacementSpecPlacementTypeCreate, + StoragePlacementSpecPlacementTypeReconfigure, + StoragePlacementSpecPlacementTypeRelocate, + StoragePlacementSpecPlacementTypeClone, + } +} + +func (e StoragePlacementSpecPlacementType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["StoragePlacementSpecPlacementType"] = reflect.TypeOf((*StoragePlacementSpecPlacementType)(nil)).Elem() - minAPIVersionForType["StoragePlacementSpecPlacementType"] = "5.0" } // This option specifies how to select tasks based on child relationships @@ -6656,6 +10542,18 @@ const ( TaskFilterSpecRecursionOptionAll = TaskFilterSpecRecursionOption("all") ) +func (e TaskFilterSpecRecursionOption) Values() []TaskFilterSpecRecursionOption { + return []TaskFilterSpecRecursionOption{ + TaskFilterSpecRecursionOptionSelf, + TaskFilterSpecRecursionOptionChildren, + TaskFilterSpecRecursionOptionAll, + } +} + +func (e TaskFilterSpecRecursionOption) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["TaskFilterSpecRecursionOption"] = reflect.TypeOf((*TaskFilterSpecRecursionOption)(nil)).Elem() } @@ -6672,6 +10570,18 @@ const ( TaskFilterSpecTimeOptionCompletedTime = TaskFilterSpecTimeOption("completedTime") ) +func (e TaskFilterSpecTimeOption) Values() []TaskFilterSpecTimeOption { + return []TaskFilterSpecTimeOption{ + TaskFilterSpecTimeOptionQueuedTime, + TaskFilterSpecTimeOptionStartedTime, + TaskFilterSpecTimeOptionCompletedTime, + } +} + +func (e TaskFilterSpecTimeOption) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["TaskFilterSpecTimeOption"] = reflect.TypeOf((*TaskFilterSpecTimeOption)(nil)).Elem() } @@ -6693,6 +10603,19 @@ const ( TaskInfoStateError = TaskInfoState("error") ) +func (e TaskInfoState) Values() []TaskInfoState { + return []TaskInfoState{ + TaskInfoStateQueued, + TaskInfoStateRunning, + TaskInfoStateSuccess, + TaskInfoStateError, + } +} + +func (e TaskInfoState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["TaskInfoState"] = reflect.TypeOf((*TaskInfoState)(nil)).Elem() } @@ -6706,12 +10629,23 @@ const ( ThirdPartyLicenseAssignmentFailedReasonModuleNotInstalled = ThirdPartyLicenseAssignmentFailedReason("moduleNotInstalled") ) +func (e ThirdPartyLicenseAssignmentFailedReason) Values() []ThirdPartyLicenseAssignmentFailedReason { + return []ThirdPartyLicenseAssignmentFailedReason{ + ThirdPartyLicenseAssignmentFailedReasonLicenseAssignmentFailed, + ThirdPartyLicenseAssignmentFailedReasonModuleNotInstalled, + } +} + +func (e ThirdPartyLicenseAssignmentFailedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ThirdPartyLicenseAssignmentFailedReason"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailedReason)(nil)).Elem() - minAPIVersionForType["ThirdPartyLicenseAssignmentFailedReason"] = "5.0" } // The policy setting used to determine when tools are auto-upgraded for +// a virtual machine type UpgradePolicy string const ( @@ -6730,9 +10664,19 @@ const ( UpgradePolicyUpgradeAtPowerCycle = UpgradePolicy("upgradeAtPowerCycle") ) +func (e UpgradePolicy) Values() []UpgradePolicy { + return []UpgradePolicy{ + UpgradePolicyManual, + UpgradePolicyUpgradeAtPowerCycle, + } +} + +func (e UpgradePolicy) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["UpgradePolicy"] = reflect.TypeOf((*UpgradePolicy)(nil)).Elem() - minAPIVersionForType["UpgradePolicy"] = "2.5" } type VAppAutoStartAction string @@ -6757,13 +10701,27 @@ const ( VAppAutoStartActionSuspend = VAppAutoStartAction("suspend") ) +func (e VAppAutoStartAction) Values() []VAppAutoStartAction { + return []VAppAutoStartAction{ + VAppAutoStartActionNone, + VAppAutoStartActionPowerOn, + VAppAutoStartActionPowerOff, + VAppAutoStartActionGuestShutdown, + VAppAutoStartActionSuspend, + } +} + +func (e VAppAutoStartAction) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VAppAutoStartAction"] = reflect.TypeOf((*VAppAutoStartAction)(nil)).Elem() - minAPIVersionForType["VAppAutoStartAction"] = "4.0" } // The cloned VMs can either be provisioned the same way as the VMs // they are a clone of, thin provisioned or thick provisioned, or +// linked clones (i.e., using delta disks). type VAppCloneSpecProvisioningType string const ( @@ -6782,11 +10740,23 @@ const ( VAppCloneSpecProvisioningTypeThick = VAppCloneSpecProvisioningType("thick") ) -func init() { - t["VAppCloneSpecProvisioningType"] = reflect.TypeOf((*VAppCloneSpecProvisioningType)(nil)).Elem() - minAPIVersionForType["VAppCloneSpecProvisioningType"] = "4.1" +func (e VAppCloneSpecProvisioningType) Values() []VAppCloneSpecProvisioningType { + return []VAppCloneSpecProvisioningType{ + VAppCloneSpecProvisioningTypeSameAsSource, + VAppCloneSpecProvisioningTypeThin, + VAppCloneSpecProvisioningTypeThick, + } } +func (e VAppCloneSpecProvisioningType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VAppCloneSpecProvisioningType"] = reflect.TypeOf((*VAppCloneSpecProvisioningType)(nil)).Elem() +} + +// IP allocation schemes supported by the guest. type VAppIPAssignmentInfoAllocationSchemes string const ( @@ -6797,11 +10767,22 @@ const ( VAppIPAssignmentInfoAllocationSchemesOvfenv = VAppIPAssignmentInfoAllocationSchemes("ovfenv") ) -func init() { - t["VAppIPAssignmentInfoAllocationSchemes"] = reflect.TypeOf((*VAppIPAssignmentInfoAllocationSchemes)(nil)).Elem() - minAPIVersionForType["VAppIPAssignmentInfoAllocationSchemes"] = "4.0" +func (e VAppIPAssignmentInfoAllocationSchemes) Values() []VAppIPAssignmentInfoAllocationSchemes { + return []VAppIPAssignmentInfoAllocationSchemes{ + VAppIPAssignmentInfoAllocationSchemesDhcp, + VAppIPAssignmentInfoAllocationSchemesOvfenv, + } } +func (e VAppIPAssignmentInfoAllocationSchemes) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VAppIPAssignmentInfoAllocationSchemes"] = reflect.TypeOf((*VAppIPAssignmentInfoAllocationSchemes)(nil)).Elem() +} + +// IP allocation policy for a deployment. type VAppIPAssignmentInfoIpAllocationPolicy string const ( @@ -6829,14 +10810,24 @@ const ( VAppIPAssignmentInfoIpAllocationPolicyFixedAllocatedPolicy = VAppIPAssignmentInfoIpAllocationPolicy("fixedAllocatedPolicy") ) -func init() { - t["VAppIPAssignmentInfoIpAllocationPolicy"] = reflect.TypeOf((*VAppIPAssignmentInfoIpAllocationPolicy)(nil)).Elem() - minAPIVersionForType["VAppIPAssignmentInfoIpAllocationPolicy"] = "4.0" - minAPIVersionForEnumValue["VAppIPAssignmentInfoIpAllocationPolicy"] = map[string]string{ - "fixedAllocatedPolicy": "5.1", +func (e VAppIPAssignmentInfoIpAllocationPolicy) Values() []VAppIPAssignmentInfoIpAllocationPolicy { + return []VAppIPAssignmentInfoIpAllocationPolicy{ + VAppIPAssignmentInfoIpAllocationPolicyDhcpPolicy, + VAppIPAssignmentInfoIpAllocationPolicyTransientPolicy, + VAppIPAssignmentInfoIpAllocationPolicyFixedPolicy, + VAppIPAssignmentInfoIpAllocationPolicyFixedAllocatedPolicy, } } +func (e VAppIPAssignmentInfoIpAllocationPolicy) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VAppIPAssignmentInfoIpAllocationPolicy"] = reflect.TypeOf((*VAppIPAssignmentInfoIpAllocationPolicy)(nil)).Elem() +} + +// IP protocols supported by the guest. type VAppIPAssignmentInfoProtocols string const ( @@ -6846,9 +10837,19 @@ const ( VAppIPAssignmentInfoProtocolsIPv6 = VAppIPAssignmentInfoProtocols("IPv6") ) +func (e VAppIPAssignmentInfoProtocols) Values() []VAppIPAssignmentInfoProtocols { + return []VAppIPAssignmentInfoProtocols{ + VAppIPAssignmentInfoProtocolsIPv4, + VAppIPAssignmentInfoProtocolsIPv6, + } +} + +func (e VAppIPAssignmentInfoProtocols) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VAppIPAssignmentInfoProtocols"] = reflect.TypeOf((*VAppIPAssignmentInfoProtocols)(nil)).Elem() - minAPIVersionForType["VAppIPAssignmentInfoProtocols"] = "4.0" } type VFlashModuleNotSupportedReason string @@ -6861,9 +10862,22 @@ const ( VFlashModuleNotSupportedReasonDiskSizeNotSupported = VFlashModuleNotSupportedReason("DiskSizeNotSupported") ) +func (e VFlashModuleNotSupportedReason) Values() []VFlashModuleNotSupportedReason { + return []VFlashModuleNotSupportedReason{ + VFlashModuleNotSupportedReasonCacheModeNotSupported, + VFlashModuleNotSupportedReasonCacheConsistencyTypeNotSupported, + VFlashModuleNotSupportedReasonCacheBlockSizeNotSupported, + VFlashModuleNotSupportedReasonCacheReservationNotSupported, + VFlashModuleNotSupportedReasonDiskSizeNotSupported, + } +} + +func (e VFlashModuleNotSupportedReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VFlashModuleNotSupportedReason"] = reflect.TypeOf((*VFlashModuleNotSupportedReason)(nil)).Elem() - minAPIVersionForType["VFlashModuleNotSupportedReason"] = "5.5" } // Types of a host's compatibility with a designated virtual machine @@ -6884,10 +10898,22 @@ const ( VMotionCompatibilityTypeSoftware = VMotionCompatibilityType("software") ) +func (e VMotionCompatibilityType) Values() []VMotionCompatibilityType { + return []VMotionCompatibilityType{ + VMotionCompatibilityTypeCpu, + VMotionCompatibilityTypeSoftware, + } +} + +func (e VMotionCompatibilityType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VMotionCompatibilityType"] = reflect.TypeOf((*VMotionCompatibilityType)(nil)).Elem() } +// The teaming health check match status. type VMwareDVSTeamingMatchStatus string const ( @@ -6913,11 +10939,24 @@ const ( VMwareDVSTeamingMatchStatusNonIphashMismatch = VMwareDVSTeamingMatchStatus("nonIphashMismatch") ) -func init() { - t["VMwareDVSTeamingMatchStatus"] = reflect.TypeOf((*VMwareDVSTeamingMatchStatus)(nil)).Elem() - minAPIVersionForType["VMwareDVSTeamingMatchStatus"] = "5.1" +func (e VMwareDVSTeamingMatchStatus) Values() []VMwareDVSTeamingMatchStatus { + return []VMwareDVSTeamingMatchStatus{ + VMwareDVSTeamingMatchStatusIphashMatch, + VMwareDVSTeamingMatchStatusNonIphashMatch, + VMwareDVSTeamingMatchStatusIphashMismatch, + VMwareDVSTeamingMatchStatusNonIphashMismatch, + } } +func (e VMwareDVSTeamingMatchStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VMwareDVSTeamingMatchStatus"] = reflect.TypeOf((*VMwareDVSTeamingMatchStatus)(nil)).Elem() +} + +// Distributed Port Mirroring session Encapsulation types. type VMwareDVSVspanSessionEncapType string const ( @@ -6929,16 +10968,26 @@ const ( VMwareDVSVspanSessionEncapTypeErspan3 = VMwareDVSVspanSessionEncapType("erspan3") ) -func init() { - t["VMwareDVSVspanSessionEncapType"] = reflect.TypeOf((*VMwareDVSVspanSessionEncapType)(nil)).Elem() - minAPIVersionForType["VMwareDVSVspanSessionEncapType"] = "6.5" +func (e VMwareDVSVspanSessionEncapType) Values() []VMwareDVSVspanSessionEncapType { + return []VMwareDVSVspanSessionEncapType{ + VMwareDVSVspanSessionEncapTypeGre, + VMwareDVSVspanSessionEncapTypeErspan2, + VMwareDVSVspanSessionEncapTypeErspan3, + } } +func (e VMwareDVSVspanSessionEncapType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VMwareDVSVspanSessionEncapType"] = reflect.TypeOf((*VMwareDVSVspanSessionEncapType)(nil)).Elem() +} + +// Distributed Port Mirroring session types. type VMwareDVSVspanSessionType string const ( - // - // // Deprecated as of vSphere API 5.1. // // In mixedDestMirror session, Distributed Ports can be used as source entities, @@ -6958,16 +11007,28 @@ const ( VMwareDVSVspanSessionTypeEncapsulatedRemoteMirrorSource = VMwareDVSVspanSessionType("encapsulatedRemoteMirrorSource") ) -func init() { - t["VMwareDVSVspanSessionType"] = reflect.TypeOf((*VMwareDVSVspanSessionType)(nil)).Elem() - minAPIVersionForType["VMwareDVSVspanSessionType"] = "5.1" +func (e VMwareDVSVspanSessionType) Values() []VMwareDVSVspanSessionType { + return []VMwareDVSVspanSessionType{ + VMwareDVSVspanSessionTypeMixedDestMirror, + VMwareDVSVspanSessionTypeDvPortMirror, + VMwareDVSVspanSessionTypeRemoteMirrorSource, + VMwareDVSVspanSessionTypeRemoteMirrorDest, + VMwareDVSVspanSessionTypeEncapsulatedRemoteMirrorSource, + } } +func (e VMwareDVSVspanSessionType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VMwareDVSVspanSessionType"] = reflect.TypeOf((*VMwareDVSVspanSessionType)(nil)).Elem() +} + +// Link Aggregation Control Protocol API versions. type VMwareDvsLacpApiVersion string const ( - // - // // Deprecated as of vSphere API 7.0u1. // // One Link Aggregation Control Protocol group in the switch @@ -6976,11 +11037,22 @@ const ( VMwareDvsLacpApiVersionMultipleLag = VMwareDvsLacpApiVersion("multipleLag") ) -func init() { - t["VMwareDvsLacpApiVersion"] = reflect.TypeOf((*VMwareDvsLacpApiVersion)(nil)).Elem() - minAPIVersionForType["VMwareDvsLacpApiVersion"] = "5.5" +func (e VMwareDvsLacpApiVersion) Values() []VMwareDvsLacpApiVersion { + return []VMwareDvsLacpApiVersion{ + VMwareDvsLacpApiVersionSingleLag, + VMwareDvsLacpApiVersionMultipleLag, + } } +func (e VMwareDvsLacpApiVersion) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VMwareDvsLacpApiVersion"] = reflect.TypeOf((*VMwareDvsLacpApiVersion)(nil)).Elem() +} + +// Load balance algorithm in a Link Aggregation Control Protocol group. type VMwareDvsLacpLoadBalanceAlgorithm string const ( @@ -7027,11 +11099,40 @@ const ( VMwareDvsLacpLoadBalanceAlgorithmSrcPortId = VMwareDvsLacpLoadBalanceAlgorithm("srcPortId") ) -func init() { - t["VMwareDvsLacpLoadBalanceAlgorithm"] = reflect.TypeOf((*VMwareDvsLacpLoadBalanceAlgorithm)(nil)).Elem() - minAPIVersionForType["VMwareDvsLacpLoadBalanceAlgorithm"] = "5.5" +func (e VMwareDvsLacpLoadBalanceAlgorithm) Values() []VMwareDvsLacpLoadBalanceAlgorithm { + return []VMwareDvsLacpLoadBalanceAlgorithm{ + VMwareDvsLacpLoadBalanceAlgorithmSrcMac, + VMwareDvsLacpLoadBalanceAlgorithmDestMac, + VMwareDvsLacpLoadBalanceAlgorithmSrcDestMac, + VMwareDvsLacpLoadBalanceAlgorithmDestIpVlan, + VMwareDvsLacpLoadBalanceAlgorithmSrcIpVlan, + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpVlan, + VMwareDvsLacpLoadBalanceAlgorithmDestTcpUdpPort, + VMwareDvsLacpLoadBalanceAlgorithmSrcTcpUdpPort, + VMwareDvsLacpLoadBalanceAlgorithmSrcDestTcpUdpPort, + VMwareDvsLacpLoadBalanceAlgorithmDestIpTcpUdpPort, + VMwareDvsLacpLoadBalanceAlgorithmSrcIpTcpUdpPort, + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpTcpUdpPort, + VMwareDvsLacpLoadBalanceAlgorithmDestIpTcpUdpPortVlan, + VMwareDvsLacpLoadBalanceAlgorithmSrcIpTcpUdpPortVlan, + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIpTcpUdpPortVlan, + VMwareDvsLacpLoadBalanceAlgorithmDestIp, + VMwareDvsLacpLoadBalanceAlgorithmSrcIp, + VMwareDvsLacpLoadBalanceAlgorithmSrcDestIp, + VMwareDvsLacpLoadBalanceAlgorithmVlan, + VMwareDvsLacpLoadBalanceAlgorithmSrcPortId, + } } +func (e VMwareDvsLacpLoadBalanceAlgorithm) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VMwareDvsLacpLoadBalanceAlgorithm"] = reflect.TypeOf((*VMwareDvsLacpLoadBalanceAlgorithm)(nil)).Elem() +} + +// Multicast Filtering mode. type VMwareDvsMulticastFilteringMode string const ( @@ -7041,11 +11142,22 @@ const ( VMwareDvsMulticastFilteringModeSnooping = VMwareDvsMulticastFilteringMode("snooping") ) -func init() { - t["VMwareDvsMulticastFilteringMode"] = reflect.TypeOf((*VMwareDvsMulticastFilteringMode)(nil)).Elem() - minAPIVersionForType["VMwareDvsMulticastFilteringMode"] = "6.0" +func (e VMwareDvsMulticastFilteringMode) Values() []VMwareDvsMulticastFilteringMode { + return []VMwareDvsMulticastFilteringMode{ + VMwareDvsMulticastFilteringModeLegacyFiltering, + VMwareDvsMulticastFilteringModeSnooping, + } } +func (e VMwareDvsMulticastFilteringMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VMwareDvsMulticastFilteringMode"] = reflect.TypeOf((*VMwareDvsMulticastFilteringMode)(nil)).Elem() +} + +// Link Aggregation Control Protocol policy modes. type VMwareUplinkLacpMode string const ( @@ -7055,9 +11167,19 @@ const ( VMwareUplinkLacpModePassive = VMwareUplinkLacpMode("passive") ) +func (e VMwareUplinkLacpMode) Values() []VMwareUplinkLacpMode { + return []VMwareUplinkLacpMode{ + VMwareUplinkLacpModeActive, + VMwareUplinkLacpModePassive, + } +} + +func (e VMwareUplinkLacpMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VMwareUplinkLacpMode"] = reflect.TypeOf((*VMwareUplinkLacpMode)(nil)).Elem() - minAPIVersionForType["VMwareUplinkLacpMode"] = "5.1" } type VMwareUplinkLacpTimeoutMode string @@ -7073,6 +11195,17 @@ const ( VMwareUplinkLacpTimeoutModeSlow = VMwareUplinkLacpTimeoutMode("slow") ) +func (e VMwareUplinkLacpTimeoutMode) Values() []VMwareUplinkLacpTimeoutMode { + return []VMwareUplinkLacpTimeoutMode{ + VMwareUplinkLacpTimeoutModeFast, + VMwareUplinkLacpTimeoutModeSlow, + } +} + +func (e VMwareUplinkLacpTimeoutMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VMwareUplinkLacpTimeoutMode"] = reflect.TypeOf((*VMwareUplinkLacpTimeoutMode)(nil)).Elem() minAPIVersionForType["VMwareUplinkLacpTimeoutMode"] = "7.0.2.0" @@ -7081,6 +11214,7 @@ func init() { // Consumption type constants. // // Consumption type describes how the virtual storage object is connected and +// consumed for data by the clients. type VStorageObjectConsumptionType string const ( @@ -7088,9 +11222,18 @@ const ( VStorageObjectConsumptionTypeDisk = VStorageObjectConsumptionType("disk") ) +func (e VStorageObjectConsumptionType) Values() []VStorageObjectConsumptionType { + return []VStorageObjectConsumptionType{ + VStorageObjectConsumptionTypeDisk, + } +} + +func (e VStorageObjectConsumptionType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VStorageObjectConsumptionType"] = reflect.TypeOf((*VStorageObjectConsumptionType)(nil)).Elem() - minAPIVersionForType["VStorageObjectConsumptionType"] = "6.5" } // Deprecated as of vSphere API 4.0, use `CheckTestType_enum` instead. @@ -7129,10 +11272,24 @@ const ( ValidateMigrationTestTypeResourceTests = ValidateMigrationTestType("resourceTests") ) +func (e ValidateMigrationTestType) Values() []ValidateMigrationTestType { + return []ValidateMigrationTestType{ + ValidateMigrationTestTypeSourceTests, + ValidateMigrationTestTypeCompatibilityTests, + ValidateMigrationTestTypeDiskAccessibilityTests, + ValidateMigrationTestTypeResourceTests, + } +} + +func (e ValidateMigrationTestType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["ValidateMigrationTestType"] = reflect.TypeOf((*ValidateMigrationTestType)(nil)).Elem() } +// VchaClusterMode enum defines the possible modes for a VCHA Cluster. type VchaClusterMode string const ( @@ -7154,11 +11311,23 @@ const ( VchaClusterModeMaintenance = VchaClusterMode("maintenance") ) -func init() { - t["VchaClusterMode"] = reflect.TypeOf((*VchaClusterMode)(nil)).Elem() - minAPIVersionForType["VchaClusterMode"] = "6.5" +func (e VchaClusterMode) Values() []VchaClusterMode { + return []VchaClusterMode{ + VchaClusterModeEnabled, + VchaClusterModeDisabled, + VchaClusterModeMaintenance, + } } +func (e VchaClusterMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VchaClusterMode"] = reflect.TypeOf((*VchaClusterMode)(nil)).Elem() +} + +// VchaClusterState enum defines the possible states for a VCHA Cluster. type VchaClusterState string const ( @@ -7178,9 +11347,20 @@ const ( VchaClusterStateIsolated = VchaClusterState("isolated") ) +func (e VchaClusterState) Values() []VchaClusterState { + return []VchaClusterState{ + VchaClusterStateHealthy, + VchaClusterStateDegraded, + VchaClusterStateIsolated, + } +} + +func (e VchaClusterState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VchaClusterState"] = reflect.TypeOf((*VchaClusterState)(nil)).Elem() - minAPIVersionForType["VchaClusterState"] = "6.5" } type VchaNodeRole string @@ -7204,12 +11384,24 @@ const ( VchaNodeRoleWitness = VchaNodeRole("witness") ) +func (e VchaNodeRole) Values() []VchaNodeRole { + return []VchaNodeRole{ + VchaNodeRoleActive, + VchaNodeRolePassive, + VchaNodeRoleWitness, + } +} + +func (e VchaNodeRole) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VchaNodeRole"] = reflect.TypeOf((*VchaNodeRole)(nil)).Elem() - minAPIVersionForType["VchaNodeRole"] = "6.5" } // VchaNodeState enum defines possible state a node can be in a +// VCHA Cluster. type VchaNodeState string const ( @@ -7219,9 +11411,19 @@ const ( VchaNodeStateDown = VchaNodeState("down") ) +func (e VchaNodeState) Values() []VchaNodeState { + return []VchaNodeState{ + VchaNodeStateUp, + VchaNodeStateDown, + } +} + +func (e VchaNodeState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VchaNodeState"] = reflect.TypeOf((*VchaNodeState)(nil)).Elem() - minAPIVersionForType["VchaNodeState"] = "6.5" } type VchaState string @@ -7237,9 +11439,21 @@ const ( VchaStatePrepared = VchaState("prepared") ) +func (e VchaState) Values() []VchaState { + return []VchaState{ + VchaStateConfigured, + VchaStateNotConfigured, + VchaStateInvalid, + VchaStatePrepared, + } +} + +func (e VchaState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VchaState"] = reflect.TypeOf((*VchaState)(nil)).Elem() - minAPIVersionForType["VchaState"] = "6.5" } // The VAppState type defines the set of states a vApp can be @@ -7247,6 +11461,7 @@ func init() { // // The transitory states between started and stopped is modeled explicitly, // since the starting or stopping of a vApp is typically a time-consuming +// process that might take minutes to complete. type VirtualAppVAppState string const ( @@ -7260,9 +11475,21 @@ const ( VirtualAppVAppStateStopping = VirtualAppVAppState("stopping") ) +func (e VirtualAppVAppState) Values() []VirtualAppVAppState { + return []VirtualAppVAppState{ + VirtualAppVAppStateStarted, + VirtualAppVAppStateStopped, + VirtualAppVAppStateStarting, + VirtualAppVAppStateStopping, + } +} + +func (e VirtualAppVAppState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualAppVAppState"] = reflect.TypeOf((*VirtualAppVAppState)(nil)).Elem() - minAPIVersionForType["VirtualAppVAppState"] = "4.0" } // Describes the change mode of the device. @@ -7275,6 +11502,17 @@ const ( VirtualDeviceConfigSpecChangeModeSkip = VirtualDeviceConfigSpecChangeMode("skip") ) +func (e VirtualDeviceConfigSpecChangeMode) Values() []VirtualDeviceConfigSpecChangeMode { + return []VirtualDeviceConfigSpecChangeMode{ + VirtualDeviceConfigSpecChangeModeFail, + VirtualDeviceConfigSpecChangeModeSkip, + } +} + +func (e VirtualDeviceConfigSpecChangeMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDeviceConfigSpecChangeMode"] = reflect.TypeOf((*VirtualDeviceConfigSpecChangeMode)(nil)).Elem() minAPIVersionForType["VirtualDeviceConfigSpecChangeMode"] = "8.0.0.1" @@ -7296,6 +11534,18 @@ const ( VirtualDeviceConfigSpecFileOperationReplace = VirtualDeviceConfigSpecFileOperation("replace") ) +func (e VirtualDeviceConfigSpecFileOperation) Values() []VirtualDeviceConfigSpecFileOperation { + return []VirtualDeviceConfigSpecFileOperation{ + VirtualDeviceConfigSpecFileOperationCreate, + VirtualDeviceConfigSpecFileOperationDestroy, + VirtualDeviceConfigSpecFileOperationReplace, + } +} + +func (e VirtualDeviceConfigSpecFileOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDeviceConfigSpecFileOperation"] = reflect.TypeOf((*VirtualDeviceConfigSpecFileOperation)(nil)).Elem() } @@ -7314,11 +11564,24 @@ const ( VirtualDeviceConfigSpecOperationEdit = VirtualDeviceConfigSpecOperation("edit") ) +func (e VirtualDeviceConfigSpecOperation) Values() []VirtualDeviceConfigSpecOperation { + return []VirtualDeviceConfigSpecOperation{ + VirtualDeviceConfigSpecOperationAdd, + VirtualDeviceConfigSpecOperationRemove, + VirtualDeviceConfigSpecOperationEdit, + } +} + +func (e VirtualDeviceConfigSpecOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDeviceConfigSpecOperation"] = reflect.TypeOf((*VirtualDeviceConfigSpecOperation)(nil)).Elem() } // Contains information about connectable virtual devices when +// the virtual machine restores from a migration. type VirtualDeviceConnectInfoMigrateConnectOp string const ( @@ -7341,11 +11604,23 @@ const ( VirtualDeviceConnectInfoMigrateConnectOpUnset = VirtualDeviceConnectInfoMigrateConnectOp("unset") ) -func init() { - t["VirtualDeviceConnectInfoMigrateConnectOp"] = reflect.TypeOf((*VirtualDeviceConnectInfoMigrateConnectOp)(nil)).Elem() - minAPIVersionForType["VirtualDeviceConnectInfoMigrateConnectOp"] = "6.7" +func (e VirtualDeviceConnectInfoMigrateConnectOp) Values() []VirtualDeviceConnectInfoMigrateConnectOp { + return []VirtualDeviceConnectInfoMigrateConnectOp{ + VirtualDeviceConnectInfoMigrateConnectOpConnect, + VirtualDeviceConnectInfoMigrateConnectOpDisconnect, + VirtualDeviceConnectInfoMigrateConnectOpUnset, + } } +func (e VirtualDeviceConnectInfoMigrateConnectOp) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualDeviceConnectInfoMigrateConnectOp"] = reflect.TypeOf((*VirtualDeviceConnectInfoMigrateConnectOp)(nil)).Elem() +} + +// Specifies the connectable virtual device status. type VirtualDeviceConnectInfoStatus string const ( @@ -7368,9 +11643,21 @@ const ( VirtualDeviceConnectInfoStatusUntried = VirtualDeviceConnectInfoStatus("untried") ) +func (e VirtualDeviceConnectInfoStatus) Values() []VirtualDeviceConnectInfoStatus { + return []VirtualDeviceConnectInfoStatus{ + VirtualDeviceConnectInfoStatusOk, + VirtualDeviceConnectInfoStatusRecoverableError, + VirtualDeviceConnectInfoStatusUnrecoverableError, + VirtualDeviceConnectInfoStatusUntried, + } +} + +func (e VirtualDeviceConnectInfoStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDeviceConnectInfoStatus"] = reflect.TypeOf((*VirtualDeviceConnectInfoStatus)(nil)).Elem() - minAPIVersionForType["VirtualDeviceConnectInfoStatus"] = "4.0" } // All known file extensions. @@ -7391,11 +11678,26 @@ const ( VirtualDeviceFileExtensionRdm = VirtualDeviceFileExtension("rdm") ) +func (e VirtualDeviceFileExtension) Values() []VirtualDeviceFileExtension { + return []VirtualDeviceFileExtension{ + VirtualDeviceFileExtensionIso, + VirtualDeviceFileExtensionFlp, + VirtualDeviceFileExtensionVmdk, + VirtualDeviceFileExtensionDsk, + VirtualDeviceFileExtensionRdm, + } +} + +func (e VirtualDeviceFileExtension) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDeviceFileExtension"] = reflect.TypeOf((*VirtualDeviceFileExtension)(nil)).Elem() } // The VirtualDeviceURIBackingOptionDirection enum type +// provides values for the direction of a network connection. type VirtualDeviceURIBackingOptionDirection string const ( @@ -7408,11 +11710,22 @@ const ( VirtualDeviceURIBackingOptionDirectionClient = VirtualDeviceURIBackingOptionDirection("client") ) -func init() { - t["VirtualDeviceURIBackingOptionDirection"] = reflect.TypeOf((*VirtualDeviceURIBackingOptionDirection)(nil)).Elem() - minAPIVersionForType["VirtualDeviceURIBackingOptionDirection"] = "4.1" +func (e VirtualDeviceURIBackingOptionDirection) Values() []VirtualDeviceURIBackingOptionDirection { + return []VirtualDeviceURIBackingOptionDirection{ + VirtualDeviceURIBackingOptionDirectionServer, + VirtualDeviceURIBackingOptionDirectionClient, + } } +func (e VirtualDeviceURIBackingOptionDirection) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualDeviceURIBackingOptionDirection"] = reflect.TypeOf((*VirtualDeviceURIBackingOptionDirection)(nil)).Elem() +} + +// The types of virtual disk adapters used by virtual disks type VirtualDiskAdapterType string const ( @@ -7424,9 +11737,20 @@ const ( VirtualDiskAdapterTypeLsiLogic = VirtualDiskAdapterType("lsiLogic") ) +func (e VirtualDiskAdapterType) Values() []VirtualDiskAdapterType { + return []VirtualDiskAdapterType{ + VirtualDiskAdapterTypeIde, + VirtualDiskAdapterTypeBusLogic, + VirtualDiskAdapterTypeLsiLogic, + } +} + +func (e VirtualDiskAdapterType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDiskAdapterType"] = reflect.TypeOf((*VirtualDiskAdapterType)(nil)).Elem() - minAPIVersionForType["VirtualDiskAdapterType"] = "2.5" } // All known compatibility modes for raw disk mappings. @@ -7449,10 +11773,22 @@ const ( VirtualDiskCompatibilityModePhysicalMode = VirtualDiskCompatibilityMode("physicalMode") ) +func (e VirtualDiskCompatibilityMode) Values() []VirtualDiskCompatibilityMode { + return []VirtualDiskCompatibilityMode{ + VirtualDiskCompatibilityModeVirtualMode, + VirtualDiskCompatibilityModePhysicalMode, + } +} + +func (e VirtualDiskCompatibilityMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDiskCompatibilityMode"] = reflect.TypeOf((*VirtualDiskCompatibilityMode)(nil)).Elem() } +// The delta disk format constants type VirtualDiskDeltaDiskFormat string const ( @@ -7464,14 +11800,23 @@ const ( VirtualDiskDeltaDiskFormatSeSparseFormat = VirtualDiskDeltaDiskFormat("seSparseFormat") ) -func init() { - t["VirtualDiskDeltaDiskFormat"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormat)(nil)).Elem() - minAPIVersionForType["VirtualDiskDeltaDiskFormat"] = "5.0" - minAPIVersionForEnumValue["VirtualDiskDeltaDiskFormat"] = map[string]string{ - "seSparseFormat": "5.1", +func (e VirtualDiskDeltaDiskFormat) Values() []VirtualDiskDeltaDiskFormat { + return []VirtualDiskDeltaDiskFormat{ + VirtualDiskDeltaDiskFormatRedoLogFormat, + VirtualDiskDeltaDiskFormatNativeFormat, + VirtualDiskDeltaDiskFormatSeSparseFormat, } } +func (e VirtualDiskDeltaDiskFormat) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualDiskDeltaDiskFormat"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormat)(nil)).Elem() +} + +// The delta disk format variant constants type VirtualDiskDeltaDiskFormatVariant string const ( @@ -7481,9 +11826,19 @@ const ( VirtualDiskDeltaDiskFormatVariantVsanSparseVariant = VirtualDiskDeltaDiskFormatVariant("vsanSparseVariant") ) +func (e VirtualDiskDeltaDiskFormatVariant) Values() []VirtualDiskDeltaDiskFormatVariant { + return []VirtualDiskDeltaDiskFormatVariant{ + VirtualDiskDeltaDiskFormatVariantVmfsSparseVariant, + VirtualDiskDeltaDiskFormatVariantVsanSparseVariant, + } +} + +func (e VirtualDiskDeltaDiskFormatVariant) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDiskDeltaDiskFormatVariant"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormatVariant)(nil)).Elem() - minAPIVersionForType["VirtualDiskDeltaDiskFormatVariant"] = "6.0" } // The list of known disk modes. @@ -7507,11 +11862,27 @@ const ( VirtualDiskModeAppend = VirtualDiskMode("append") ) +func (e VirtualDiskMode) Values() []VirtualDiskMode { + return []VirtualDiskMode{ + VirtualDiskModePersistent, + VirtualDiskModeNonpersistent, + VirtualDiskModeUndoable, + VirtualDiskModeIndependent_persistent, + VirtualDiskModeIndependent_nonpersistent, + VirtualDiskModeAppend, + } +} + +func (e VirtualDiskMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDiskMode"] = reflect.TypeOf((*VirtualDiskMode)(nil)).Elem() } // Rule type determines how the virtual disks in a vm can be grouped +// together. type VirtualDiskRuleSpecRuleType string const ( @@ -7524,15 +11895,27 @@ const ( VirtualDiskRuleSpecRuleTypeDisabled = VirtualDiskRuleSpecRuleType("disabled") ) +func (e VirtualDiskRuleSpecRuleType) Values() []VirtualDiskRuleSpecRuleType { + return []VirtualDiskRuleSpecRuleType{ + VirtualDiskRuleSpecRuleTypeAffinity, + VirtualDiskRuleSpecRuleTypeAntiAffinity, + VirtualDiskRuleSpecRuleTypeDisabled, + } +} + +func (e VirtualDiskRuleSpecRuleType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDiskRuleSpecRuleType"] = reflect.TypeOf((*VirtualDiskRuleSpecRuleType)(nil)).Elem() - minAPIVersionForType["VirtualDiskRuleSpecRuleType"] = "6.7" } // The sharing mode of the virtual disk. // // Setting the value to sharingMultiWriter means that multiple virtual // machines can write to the virtual disk. This sharing mode is allowed +// only for eagerly zeroed thick virtual disks. type VirtualDiskSharing string const ( @@ -7542,11 +11925,22 @@ const ( VirtualDiskSharingSharingMultiWriter = VirtualDiskSharing("sharingMultiWriter") ) -func init() { - t["VirtualDiskSharing"] = reflect.TypeOf((*VirtualDiskSharing)(nil)).Elem() - minAPIVersionForType["VirtualDiskSharing"] = "6.0" +func (e VirtualDiskSharing) Values() []VirtualDiskSharing { + return []VirtualDiskSharing{ + VirtualDiskSharingSharingNone, + VirtualDiskSharingSharingMultiWriter, + } } +func (e VirtualDiskSharing) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualDiskSharing"] = reflect.TypeOf((*VirtualDiskSharing)(nil)).Elem() +} + +// The types of virtual disks that can be created or cloned. type VirtualDiskType string const ( @@ -7614,8 +12008,6 @@ const ( // other VMware products. This format is only applicable as a destination // format in a clone operation, and not usable for disk creation. VirtualDiskTypeFlatMonolithic = VirtualDiskType("flatMonolithic") - // - // // Deprecated as of vSphere API 4.x, use `eagerZeroedThick` instead // for clustering application, and `preallocated` for other applications. // @@ -7630,17 +12022,33 @@ const ( VirtualDiskTypeThick = VirtualDiskType("thick") ) -func init() { - t["VirtualDiskType"] = reflect.TypeOf((*VirtualDiskType)(nil)).Elem() - minAPIVersionForType["VirtualDiskType"] = "2.5" - minAPIVersionForEnumValue["VirtualDiskType"] = map[string]string{ - "seSparse": "5.1", - "delta": "5.5", - "sparseMonolithic": "4.0", - "flatMonolithic": "4.0", +func (e VirtualDiskType) Values() []VirtualDiskType { + return []VirtualDiskType{ + VirtualDiskTypePreallocated, + VirtualDiskTypeThin, + VirtualDiskTypeSeSparse, + VirtualDiskTypeRdm, + VirtualDiskTypeRdmp, + VirtualDiskTypeRaw, + VirtualDiskTypeDelta, + VirtualDiskTypeSparse2Gb, + VirtualDiskTypeThick2Gb, + VirtualDiskTypeEagerZeroedThick, + VirtualDiskTypeSparseMonolithic, + VirtualDiskTypeFlatMonolithic, + VirtualDiskTypeThick, } } +func (e VirtualDiskType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualDiskType"] = reflect.TypeOf((*VirtualDiskType)(nil)).Elem() +} + +// Pre-defined constants for cache consistency types type VirtualDiskVFlashCacheConfigInfoCacheConsistencyType string const ( @@ -7651,11 +12059,22 @@ const ( VirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeWeak = VirtualDiskVFlashCacheConfigInfoCacheConsistencyType("weak") ) -func init() { - t["VirtualDiskVFlashCacheConfigInfoCacheConsistencyType"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheConsistencyType)(nil)).Elem() - minAPIVersionForType["VirtualDiskVFlashCacheConfigInfoCacheConsistencyType"] = "5.5" +func (e VirtualDiskVFlashCacheConfigInfoCacheConsistencyType) Values() []VirtualDiskVFlashCacheConfigInfoCacheConsistencyType { + return []VirtualDiskVFlashCacheConfigInfoCacheConsistencyType{ + VirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeStrong, + VirtualDiskVFlashCacheConfigInfoCacheConsistencyTypeWeak, + } } +func (e VirtualDiskVFlashCacheConfigInfoCacheConsistencyType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualDiskVFlashCacheConfigInfoCacheConsistencyType"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheConsistencyType)(nil)).Elem() +} + +// Pre-defined constants for cache modes. type VirtualDiskVFlashCacheConfigInfoCacheMode string const ( @@ -7673,9 +12092,19 @@ const ( VirtualDiskVFlashCacheConfigInfoCacheModeWrite_back = VirtualDiskVFlashCacheConfigInfoCacheMode("write_back") ) +func (e VirtualDiskVFlashCacheConfigInfoCacheMode) Values() []VirtualDiskVFlashCacheConfigInfoCacheMode { + return []VirtualDiskVFlashCacheConfigInfoCacheMode{ + VirtualDiskVFlashCacheConfigInfoCacheModeWrite_thru, + VirtualDiskVFlashCacheConfigInfoCacheModeWrite_back, + } +} + +func (e VirtualDiskVFlashCacheConfigInfoCacheMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualDiskVFlashCacheConfigInfoCacheMode"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfoCacheMode)(nil)).Elem() - minAPIVersionForType["VirtualDiskVFlashCacheConfigInfoCacheMode"] = "5.5" } // Possible device names for legacy network backing option are listed below. @@ -7695,6 +12124,18 @@ const ( VirtualEthernetCardLegacyNetworkDeviceNameHostonly = VirtualEthernetCardLegacyNetworkDeviceName("hostonly") ) +func (e VirtualEthernetCardLegacyNetworkDeviceName) Values() []VirtualEthernetCardLegacyNetworkDeviceName { + return []VirtualEthernetCardLegacyNetworkDeviceName{ + VirtualEthernetCardLegacyNetworkDeviceNameBridged, + VirtualEthernetCardLegacyNetworkDeviceNameNat, + VirtualEthernetCardLegacyNetworkDeviceNameHostonly, + } +} + +func (e VirtualEthernetCardLegacyNetworkDeviceName) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualEthernetCardLegacyNetworkDeviceName"] = reflect.TypeOf((*VirtualEthernetCardLegacyNetworkDeviceName)(nil)).Elem() } @@ -7711,6 +12152,18 @@ const ( VirtualEthernetCardMacTypeAssigned = VirtualEthernetCardMacType("assigned") ) +func (e VirtualEthernetCardMacType) Values() []VirtualEthernetCardMacType { + return []VirtualEthernetCardMacType{ + VirtualEthernetCardMacTypeManual, + VirtualEthernetCardMacTypeGenerated, + VirtualEthernetCardMacTypeAssigned, + } +} + +func (e VirtualEthernetCardMacType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualEthernetCardMacType"] = reflect.TypeOf((*VirtualEthernetCardMacType)(nil)).Elem() } @@ -7724,11 +12177,23 @@ const ( VirtualHardwareMotherboardLayoutAcpiHostBridges = VirtualHardwareMotherboardLayout("acpiHostBridges") ) +func (e VirtualHardwareMotherboardLayout) Values() []VirtualHardwareMotherboardLayout { + return []VirtualHardwareMotherboardLayout{ + VirtualHardwareMotherboardLayoutI440bxHostBridge, + VirtualHardwareMotherboardLayoutAcpiHostBridges, + } +} + +func (e VirtualHardwareMotherboardLayout) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualHardwareMotherboardLayout"] = reflect.TypeOf((*VirtualHardwareMotherboardLayout)(nil)).Elem() minAPIVersionForType["VirtualHardwareMotherboardLayout"] = "8.0.0.1" } +// Application heartbeat status type. type VirtualMachineAppHeartbeatStatusType string const ( @@ -7740,9 +12205,20 @@ const ( VirtualMachineAppHeartbeatStatusTypeAppStatusRed = VirtualMachineAppHeartbeatStatusType("appStatusRed") ) +func (e VirtualMachineAppHeartbeatStatusType) Values() []VirtualMachineAppHeartbeatStatusType { + return []VirtualMachineAppHeartbeatStatusType{ + VirtualMachineAppHeartbeatStatusTypeAppStatusGray, + VirtualMachineAppHeartbeatStatusTypeAppStatusGreen, + VirtualMachineAppHeartbeatStatusTypeAppStatusRed, + } +} + +func (e VirtualMachineAppHeartbeatStatusType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineAppHeartbeatStatusType"] = reflect.TypeOf((*VirtualMachineAppHeartbeatStatusType)(nil)).Elem() - minAPIVersionForType["VirtualMachineAppHeartbeatStatusType"] = "4.1" } type VirtualMachineBootOptionsNetworkBootProtocolType string @@ -7758,9 +12234,19 @@ const ( VirtualMachineBootOptionsNetworkBootProtocolTypeIpv6 = VirtualMachineBootOptionsNetworkBootProtocolType("ipv6") ) +func (e VirtualMachineBootOptionsNetworkBootProtocolType) Values() []VirtualMachineBootOptionsNetworkBootProtocolType { + return []VirtualMachineBootOptionsNetworkBootProtocolType{ + VirtualMachineBootOptionsNetworkBootProtocolTypeIpv4, + VirtualMachineBootOptionsNetworkBootProtocolTypeIpv6, + } +} + +func (e VirtualMachineBootOptionsNetworkBootProtocolType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineBootOptionsNetworkBootProtocolType"] = reflect.TypeOf((*VirtualMachineBootOptionsNetworkBootProtocolType)(nil)).Elem() - minAPIVersionForType["VirtualMachineBootOptionsNetworkBootProtocolType"] = "6.0" } type VirtualMachineCertThumbprintHashAlgorithm string @@ -7770,6 +12256,16 @@ const ( VirtualMachineCertThumbprintHashAlgorithmSha256 = VirtualMachineCertThumbprintHashAlgorithm("sha256") ) +func (e VirtualMachineCertThumbprintHashAlgorithm) Values() []VirtualMachineCertThumbprintHashAlgorithm { + return []VirtualMachineCertThumbprintHashAlgorithm{ + VirtualMachineCertThumbprintHashAlgorithmSha256, + } +} + +func (e VirtualMachineCertThumbprintHashAlgorithm) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineCertThumbprintHashAlgorithm"] = reflect.TypeOf((*VirtualMachineCertThumbprintHashAlgorithm)(nil)).Elem() minAPIVersionForType["VirtualMachineCertThumbprintHashAlgorithm"] = "7.0.3.1" @@ -7791,11 +12287,23 @@ const ( VirtualMachineCloneSpecTpmProvisionPolicyReplace = VirtualMachineCloneSpecTpmProvisionPolicy("replace") ) +func (e VirtualMachineCloneSpecTpmProvisionPolicy) Values() []VirtualMachineCloneSpecTpmProvisionPolicy { + return []VirtualMachineCloneSpecTpmProvisionPolicy{ + VirtualMachineCloneSpecTpmProvisionPolicyCopy, + VirtualMachineCloneSpecTpmProvisionPolicyReplace, + } +} + +func (e VirtualMachineCloneSpecTpmProvisionPolicy) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineCloneSpecTpmProvisionPolicy"] = reflect.TypeOf((*VirtualMachineCloneSpecTpmProvisionPolicy)(nil)).Elem() minAPIVersionForType["VirtualMachineCloneSpecTpmProvisionPolicy"] = "8.0.0.1" } +// The NPIV WWN source type. type VirtualMachineConfigInfoNpivWwnType string const ( @@ -7807,9 +12315,20 @@ const ( VirtualMachineConfigInfoNpivWwnTypeExternal = VirtualMachineConfigInfoNpivWwnType("external") ) +func (e VirtualMachineConfigInfoNpivWwnType) Values() []VirtualMachineConfigInfoNpivWwnType { + return []VirtualMachineConfigInfoNpivWwnType{ + VirtualMachineConfigInfoNpivWwnTypeVc, + VirtualMachineConfigInfoNpivWwnTypeHost, + VirtualMachineConfigInfoNpivWwnTypeExternal, + } +} + +func (e VirtualMachineConfigInfoNpivWwnType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineConfigInfoNpivWwnType"] = reflect.TypeOf((*VirtualMachineConfigInfoNpivWwnType)(nil)).Elem() - minAPIVersionForType["VirtualMachineConfigInfoNpivWwnType"] = "2.5" } // Available choices for virtual machine swapfile placement policy. @@ -7820,6 +12339,7 @@ func init() { // values except for "inherit" and "vmConfigured" are also valid values for // a compute resource configuration's // `vmSwapPlacement` +// property. type VirtualMachineConfigInfoSwapPlacementType string const ( @@ -7840,9 +12360,20 @@ const ( VirtualMachineConfigInfoSwapPlacementTypeHostLocal = VirtualMachineConfigInfoSwapPlacementType("hostLocal") ) +func (e VirtualMachineConfigInfoSwapPlacementType) Values() []VirtualMachineConfigInfoSwapPlacementType { + return []VirtualMachineConfigInfoSwapPlacementType{ + VirtualMachineConfigInfoSwapPlacementTypeInherit, + VirtualMachineConfigInfoSwapPlacementTypeVmDirectory, + VirtualMachineConfigInfoSwapPlacementTypeHostLocal, + } +} + +func (e VirtualMachineConfigInfoSwapPlacementType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineConfigInfoSwapPlacementType"] = reflect.TypeOf((*VirtualMachineConfigInfoSwapPlacementType)(nil)).Elem() - minAPIVersionForType["VirtualMachineConfigInfoSwapPlacementType"] = "2.5" } // The set of valid encrypted Fault Tolerance modes for a VM. @@ -7866,12 +12397,26 @@ const ( VirtualMachineConfigSpecEncryptedFtModesFtEncryptionRequired = VirtualMachineConfigSpecEncryptedFtModes("ftEncryptionRequired") ) +func (e VirtualMachineConfigSpecEncryptedFtModes) Values() []VirtualMachineConfigSpecEncryptedFtModes { + return []VirtualMachineConfigSpecEncryptedFtModes{ + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionDisabled, + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionOpportunistic, + VirtualMachineConfigSpecEncryptedFtModesFtEncryptionRequired, + } +} + +func (e VirtualMachineConfigSpecEncryptedFtModes) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineConfigSpecEncryptedFtModes"] = reflect.TypeOf((*VirtualMachineConfigSpecEncryptedFtModes)(nil)).Elem() minAPIVersionForType["VirtualMachineConfigSpecEncryptedFtModes"] = "7.0.2.0" } // The set of valid encrypted vMotion modes for a VM. +// +// If the VM is encrypted, its encrypted vMotion mode will be required. type VirtualMachineConfigSpecEncryptedVMotionModes string const ( @@ -7889,11 +12434,23 @@ const ( VirtualMachineConfigSpecEncryptedVMotionModesRequired = VirtualMachineConfigSpecEncryptedVMotionModes("required") ) -func init() { - t["VirtualMachineConfigSpecEncryptedVMotionModes"] = reflect.TypeOf((*VirtualMachineConfigSpecEncryptedVMotionModes)(nil)).Elem() - minAPIVersionForType["VirtualMachineConfigSpecEncryptedVMotionModes"] = "6.5" +func (e VirtualMachineConfigSpecEncryptedVMotionModes) Values() []VirtualMachineConfigSpecEncryptedVMotionModes { + return []VirtualMachineConfigSpecEncryptedVMotionModes{ + VirtualMachineConfigSpecEncryptedVMotionModesDisabled, + VirtualMachineConfigSpecEncryptedVMotionModesOpportunistic, + VirtualMachineConfigSpecEncryptedVMotionModesRequired, + } } +func (e VirtualMachineConfigSpecEncryptedVMotionModes) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineConfigSpecEncryptedVMotionModes"] = reflect.TypeOf((*VirtualMachineConfigSpecEncryptedVMotionModes)(nil)).Elem() +} + +// The root WWN operation mode. type VirtualMachineConfigSpecNpivWwnOp string const ( @@ -7911,12 +12468,21 @@ const ( VirtualMachineConfigSpecNpivWwnOpExtend = VirtualMachineConfigSpecNpivWwnOp("extend") ) +func (e VirtualMachineConfigSpecNpivWwnOp) Values() []VirtualMachineConfigSpecNpivWwnOp { + return []VirtualMachineConfigSpecNpivWwnOp{ + VirtualMachineConfigSpecNpivWwnOpGenerate, + VirtualMachineConfigSpecNpivWwnOpSet, + VirtualMachineConfigSpecNpivWwnOpRemove, + VirtualMachineConfigSpecNpivWwnOpExtend, + } +} + +func (e VirtualMachineConfigSpecNpivWwnOp) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineConfigSpecNpivWwnOp"] = reflect.TypeOf((*VirtualMachineConfigSpecNpivWwnOp)(nil)).Elem() - minAPIVersionForType["VirtualMachineConfigSpecNpivWwnOp"] = "2.5" - minAPIVersionForEnumValue["VirtualMachineConfigSpecNpivWwnOp"] = map[string]string{ - "extend": "4.0", - } } // The connectivity state of a virtual machine. @@ -7958,10 +12524,25 @@ const ( VirtualMachineConnectionStateInvalid = VirtualMachineConnectionState("invalid") ) +func (e VirtualMachineConnectionState) Values() []VirtualMachineConnectionState { + return []VirtualMachineConnectionState{ + VirtualMachineConnectionStateConnected, + VirtualMachineConnectionStateDisconnected, + VirtualMachineConnectionStateOrphaned, + VirtualMachineConnectionStateInaccessible, + VirtualMachineConnectionStateInvalid, + } +} + +func (e VirtualMachineConnectionState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineConnectionState"] = reflect.TypeOf((*VirtualMachineConnectionState)(nil)).Elem() } +// The crypto state of a encrypted virtual machine. type VirtualMachineCryptoState string const ( @@ -7972,9 +12553,19 @@ const ( VirtualMachineCryptoStateLocked = VirtualMachineCryptoState("locked") ) +func (e VirtualMachineCryptoState) Values() []VirtualMachineCryptoState { + return []VirtualMachineCryptoState{ + VirtualMachineCryptoStateUnlocked, + VirtualMachineCryptoStateLocked, + } +} + +func (e VirtualMachineCryptoState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineCryptoState"] = reflect.TypeOf((*VirtualMachineCryptoState)(nil)).Elem() - minAPIVersionForType["VirtualMachineCryptoState"] = "6.7" } type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther string @@ -7996,9 +12587,19 @@ const ( VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleNetwork = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther("vmNptIncompatibleNetwork") ) +func (e VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther) Values() []VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther { + return []VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther{ + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleHost, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOtherVmNptIncompatibleNetwork, + } +} + +func (e VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther)(nil)).Elem() - minAPIVersionForType["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonOther"] = "4.1" } type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm string @@ -8026,8 +12627,6 @@ const ( // The virtual machine does not have full memory reservation // required to activate VMDirectPath Gen 2. VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptInsufficientMemoryReservation = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptInsufficientMemoryReservation") - // - // // Deprecated as of vSphere API 6.0. // // The virtual machine is configured for Fault Tolerance or @@ -8063,16 +12662,36 @@ const ( VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptVMCIActive = VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm("vmNptVMCIActive") ) +func (e VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm) Values() []VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm { + return []VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm{ + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleGuest, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleGuestDriver, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleAdapterType, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptDisabledOrDisconnectedAdapter, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleAdapterFeatures, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptIncompatibleBackingType, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptInsufficientMemoryReservation, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptFaultToleranceOrRecordReplayConfigured, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptConflictingIOChainConfigured, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptMonitorBlocks, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptConflictingOperationInProgress, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptRuntimeError, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptOutOfIntrVector, + VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVmVmNptVMCIActive, + } +} + +func (e VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm)(nil)).Elem() - minAPIVersionForType["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm"] = "4.1" - minAPIVersionForEnumValue["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeStateVmDirectPathGen2InactiveReasonVm"] = map[string]string{ - "vmNptVMCIActive": "5.1", - } } // The FaultToleranceState type defines a simple set of states for a // fault tolerant virtual machine: +// disabled, starting, and enabled. type VirtualMachineFaultToleranceState string const ( @@ -8115,12 +12734,27 @@ const ( VirtualMachineFaultToleranceStateRunning = VirtualMachineFaultToleranceState("running") ) +func (e VirtualMachineFaultToleranceState) Values() []VirtualMachineFaultToleranceState { + return []VirtualMachineFaultToleranceState{ + VirtualMachineFaultToleranceStateNotConfigured, + VirtualMachineFaultToleranceStateDisabled, + VirtualMachineFaultToleranceStateEnabled, + VirtualMachineFaultToleranceStateNeedSecondary, + VirtualMachineFaultToleranceStateStarting, + VirtualMachineFaultToleranceStateRunning, + } +} + +func (e VirtualMachineFaultToleranceState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineFaultToleranceState"] = reflect.TypeOf((*VirtualMachineFaultToleranceState)(nil)).Elem() - minAPIVersionForType["VirtualMachineFaultToleranceState"] = "4.0" } // The FaultToleranceType defines the type of fault tolerance, if any, +// the virtual machine is configured for. type VirtualMachineFaultToleranceType string const ( @@ -8132,14 +12766,23 @@ const ( VirtualMachineFaultToleranceTypeCheckpointing = VirtualMachineFaultToleranceType("checkpointing") ) -func init() { - t["VirtualMachineFaultToleranceType"] = reflect.TypeOf((*VirtualMachineFaultToleranceType)(nil)).Elem() - minAPIVersionForType["VirtualMachineFaultToleranceType"] = "6.0" - minAPIVersionForEnumValue["VirtualMachineFaultToleranceType"] = map[string]string{ - "unset": "6.0", +func (e VirtualMachineFaultToleranceType) Values() []VirtualMachineFaultToleranceType { + return []VirtualMachineFaultToleranceType{ + VirtualMachineFaultToleranceTypeUnset, + VirtualMachineFaultToleranceTypeRecordReplay, + VirtualMachineFaultToleranceTypeCheckpointing, } } +func (e VirtualMachineFaultToleranceType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineFaultToleranceType"] = reflect.TypeOf((*VirtualMachineFaultToleranceType)(nil)).Elem() +} + +// File-type constants. type VirtualMachineFileLayoutExFileType string const ( @@ -8201,25 +12844,49 @@ const ( VirtualMachineFileLayoutExFileTypeGuestCustomization = VirtualMachineFileLayoutExFileType("guestCustomization") ) -func init() { - t["VirtualMachineFileLayoutExFileType"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileType)(nil)).Elem() - minAPIVersionForType["VirtualMachineFileLayoutExFileType"] = "4.0" - minAPIVersionForEnumValue["VirtualMachineFileLayoutExFileType"] = map[string]string{ - "digestDescriptor": "5.0", - "digestExtent": "5.0", - "diskReplicationState": "5.0", - "namespaceData": "5.1", - "dataSetsDiskModeStore": "8.0.0.0", - "dataSetsVmModeStore": "8.0.0.0", - "snapshotMemory": "6.0", - "snapshotManifestList": "5.0", - "suspendMemory": "6.0", - "uwswap": "5.0", - "ftMetadata": "6.0", - "guestCustomization": "6.0", +func (e VirtualMachineFileLayoutExFileType) Values() []VirtualMachineFileLayoutExFileType { + return []VirtualMachineFileLayoutExFileType{ + VirtualMachineFileLayoutExFileTypeConfig, + VirtualMachineFileLayoutExFileTypeExtendedConfig, + VirtualMachineFileLayoutExFileTypeDiskDescriptor, + VirtualMachineFileLayoutExFileTypeDiskExtent, + VirtualMachineFileLayoutExFileTypeDigestDescriptor, + VirtualMachineFileLayoutExFileTypeDigestExtent, + VirtualMachineFileLayoutExFileTypeDiskReplicationState, + VirtualMachineFileLayoutExFileTypeLog, + VirtualMachineFileLayoutExFileTypeStat, + VirtualMachineFileLayoutExFileTypeNamespaceData, + VirtualMachineFileLayoutExFileTypeDataSetsDiskModeStore, + VirtualMachineFileLayoutExFileTypeDataSetsVmModeStore, + VirtualMachineFileLayoutExFileTypeNvram, + VirtualMachineFileLayoutExFileTypeSnapshotData, + VirtualMachineFileLayoutExFileTypeSnapshotMemory, + VirtualMachineFileLayoutExFileTypeSnapshotList, + VirtualMachineFileLayoutExFileTypeSnapshotManifestList, + VirtualMachineFileLayoutExFileTypeSuspend, + VirtualMachineFileLayoutExFileTypeSuspendMemory, + VirtualMachineFileLayoutExFileTypeSwap, + VirtualMachineFileLayoutExFileTypeUwswap, + VirtualMachineFileLayoutExFileTypeCore, + VirtualMachineFileLayoutExFileTypeScreenshot, + VirtualMachineFileLayoutExFileTypeFtMetadata, + VirtualMachineFileLayoutExFileTypeGuestCustomization, } } +func (e VirtualMachineFileLayoutExFileType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineFileLayoutExFileType"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileType)(nil)).Elem() + minAPIVersionForEnumValue["VirtualMachineFileLayoutExFileType"] = map[string]string{ + "dataSetsDiskModeStore": "8.0.0.0", + "dataSetsVmModeStore": "8.0.0.0", + } +} + +// Set of possible values for `VirtualMachineFlagInfo.monitorType`. type VirtualMachineFlagInfoMonitorType string const ( @@ -8231,11 +12898,23 @@ const ( VirtualMachineFlagInfoMonitorTypeStats = VirtualMachineFlagInfoMonitorType("stats") ) -func init() { - t["VirtualMachineFlagInfoMonitorType"] = reflect.TypeOf((*VirtualMachineFlagInfoMonitorType)(nil)).Elem() - minAPIVersionForType["VirtualMachineFlagInfoMonitorType"] = "2.5" +func (e VirtualMachineFlagInfoMonitorType) Values() []VirtualMachineFlagInfoMonitorType { + return []VirtualMachineFlagInfoMonitorType{ + VirtualMachineFlagInfoMonitorTypeRelease, + VirtualMachineFlagInfoMonitorTypeDebug, + VirtualMachineFlagInfoMonitorTypeStats, + } } +func (e VirtualMachineFlagInfoMonitorType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineFlagInfoMonitorType"] = reflect.TypeOf((*VirtualMachineFlagInfoMonitorType)(nil)).Elem() +} + +// Set of possible values for `VirtualMachineFlagInfo.virtualExecUsage`. type VirtualMachineFlagInfoVirtualExecUsage string const ( @@ -8247,11 +12926,23 @@ const ( VirtualMachineFlagInfoVirtualExecUsageHvOff = VirtualMachineFlagInfoVirtualExecUsage("hvOff") ) -func init() { - t["VirtualMachineFlagInfoVirtualExecUsage"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualExecUsage)(nil)).Elem() - minAPIVersionForType["VirtualMachineFlagInfoVirtualExecUsage"] = "4.0" +func (e VirtualMachineFlagInfoVirtualExecUsage) Values() []VirtualMachineFlagInfoVirtualExecUsage { + return []VirtualMachineFlagInfoVirtualExecUsage{ + VirtualMachineFlagInfoVirtualExecUsageHvAuto, + VirtualMachineFlagInfoVirtualExecUsageHvOn, + VirtualMachineFlagInfoVirtualExecUsageHvOff, + } } +func (e VirtualMachineFlagInfoVirtualExecUsage) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineFlagInfoVirtualExecUsage"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualExecUsage)(nil)).Elem() +} + +// Set of possible values for `VirtualMachineFlagInfo.virtualMmuUsage`. type VirtualMachineFlagInfoVirtualMmuUsage string const ( @@ -8263,14 +12954,26 @@ const ( VirtualMachineFlagInfoVirtualMmuUsageOff = VirtualMachineFlagInfoVirtualMmuUsage("off") ) +func (e VirtualMachineFlagInfoVirtualMmuUsage) Values() []VirtualMachineFlagInfoVirtualMmuUsage { + return []VirtualMachineFlagInfoVirtualMmuUsage{ + VirtualMachineFlagInfoVirtualMmuUsageAutomatic, + VirtualMachineFlagInfoVirtualMmuUsageOn, + VirtualMachineFlagInfoVirtualMmuUsageOff, + } +} + +func (e VirtualMachineFlagInfoVirtualMmuUsage) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineFlagInfoVirtualMmuUsage"] = reflect.TypeOf((*VirtualMachineFlagInfoVirtualMmuUsage)(nil)).Elem() - minAPIVersionForType["VirtualMachineFlagInfoVirtualMmuUsage"] = "2.5" } // Fork child type. // // A child could be type of none, persistent, or +// nonpersistent. type VirtualMachineForkConfigInfoChildType string const ( @@ -8282,9 +12985,20 @@ const ( VirtualMachineForkConfigInfoChildTypeNonpersistent = VirtualMachineForkConfigInfoChildType("nonpersistent") ) +func (e VirtualMachineForkConfigInfoChildType) Values() []VirtualMachineForkConfigInfoChildType { + return []VirtualMachineForkConfigInfoChildType{ + VirtualMachineForkConfigInfoChildTypeNone, + VirtualMachineForkConfigInfoChildTypePersistent, + VirtualMachineForkConfigInfoChildTypeNonpersistent, + } +} + +func (e VirtualMachineForkConfigInfoChildType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineForkConfigInfoChildType"] = reflect.TypeOf((*VirtualMachineForkConfigInfoChildType)(nil)).Elem() - minAPIVersionForType["VirtualMachineForkConfigInfoChildType"] = "6.0" } // Guest operating system family constants. @@ -8305,11 +13019,23 @@ const ( VirtualMachineGuestOsFamilyOtherGuestFamily = VirtualMachineGuestOsFamily("otherGuestFamily") ) +func (e VirtualMachineGuestOsFamily) Values() []VirtualMachineGuestOsFamily { + return []VirtualMachineGuestOsFamily{ + VirtualMachineGuestOsFamilyWindowsGuest, + VirtualMachineGuestOsFamilyLinuxGuest, + VirtualMachineGuestOsFamilyNetwareGuest, + VirtualMachineGuestOsFamilySolarisGuest, + VirtualMachineGuestOsFamilyDarwinGuestFamily, + VirtualMachineGuestOsFamilyOtherGuestFamily, + } +} + +func (e VirtualMachineGuestOsFamily) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineGuestOsFamily"] = reflect.TypeOf((*VirtualMachineGuestOsFamily)(nil)).Elem() - minAPIVersionForEnumValue["VirtualMachineGuestOsFamily"] = map[string]string{ - "darwinGuestFamily": "5.0", - } } // Guest operating system identifier. @@ -8698,6 +13424,8 @@ const ( VirtualMachineGuestOsIdentifierAmazonlinux3_64Guest = VirtualMachineGuestOsIdentifier("amazonlinux3_64Guest") // CRX Pod 1 VirtualMachineGuestOsIdentifierCrxPod1Guest = VirtualMachineGuestOsIdentifier("crxPod1Guest") + // CRX Sys 1 + VirtualMachineGuestOsIdentifierCrxSys1Guest = VirtualMachineGuestOsIdentifier("crxSys1Guest") // Rocky Linux (64-bit) VirtualMachineGuestOsIdentifierRockylinux_64Guest = VirtualMachineGuestOsIdentifier("rockylinux_64Guest") // AlmaLinux (64-bit) @@ -8708,145 +13436,240 @@ const ( VirtualMachineGuestOsIdentifierOtherGuest64 = VirtualMachineGuestOsIdentifier("otherGuest64") ) +func (e VirtualMachineGuestOsIdentifier) Values() []VirtualMachineGuestOsIdentifier { + return []VirtualMachineGuestOsIdentifier{ + VirtualMachineGuestOsIdentifierDosGuest, + VirtualMachineGuestOsIdentifierWin31Guest, + VirtualMachineGuestOsIdentifierWin95Guest, + VirtualMachineGuestOsIdentifierWin98Guest, + VirtualMachineGuestOsIdentifierWinMeGuest, + VirtualMachineGuestOsIdentifierWinNTGuest, + VirtualMachineGuestOsIdentifierWin2000ProGuest, + VirtualMachineGuestOsIdentifierWin2000ServGuest, + VirtualMachineGuestOsIdentifierWin2000AdvServGuest, + VirtualMachineGuestOsIdentifierWinXPHomeGuest, + VirtualMachineGuestOsIdentifierWinXPProGuest, + VirtualMachineGuestOsIdentifierWinXPPro64Guest, + VirtualMachineGuestOsIdentifierWinNetWebGuest, + VirtualMachineGuestOsIdentifierWinNetStandardGuest, + VirtualMachineGuestOsIdentifierWinNetEnterpriseGuest, + VirtualMachineGuestOsIdentifierWinNetDatacenterGuest, + VirtualMachineGuestOsIdentifierWinNetBusinessGuest, + VirtualMachineGuestOsIdentifierWinNetStandard64Guest, + VirtualMachineGuestOsIdentifierWinNetEnterprise64Guest, + VirtualMachineGuestOsIdentifierWinLonghornGuest, + VirtualMachineGuestOsIdentifierWinLonghorn64Guest, + VirtualMachineGuestOsIdentifierWinNetDatacenter64Guest, + VirtualMachineGuestOsIdentifierWinVistaGuest, + VirtualMachineGuestOsIdentifierWinVista64Guest, + VirtualMachineGuestOsIdentifierWindows7Guest, + VirtualMachineGuestOsIdentifierWindows7_64Guest, + VirtualMachineGuestOsIdentifierWindows7Server64Guest, + VirtualMachineGuestOsIdentifierWindows8Guest, + VirtualMachineGuestOsIdentifierWindows8_64Guest, + VirtualMachineGuestOsIdentifierWindows8Server64Guest, + VirtualMachineGuestOsIdentifierWindows9Guest, + VirtualMachineGuestOsIdentifierWindows9_64Guest, + VirtualMachineGuestOsIdentifierWindows9Server64Guest, + VirtualMachineGuestOsIdentifierWindows11_64Guest, + VirtualMachineGuestOsIdentifierWindows12_64Guest, + VirtualMachineGuestOsIdentifierWindowsHyperVGuest, + VirtualMachineGuestOsIdentifierWindows2019srv_64Guest, + VirtualMachineGuestOsIdentifierWindows2019srvNext_64Guest, + VirtualMachineGuestOsIdentifierWindows2022srvNext_64Guest, + VirtualMachineGuestOsIdentifierFreebsdGuest, + VirtualMachineGuestOsIdentifierFreebsd64Guest, + VirtualMachineGuestOsIdentifierFreebsd11Guest, + VirtualMachineGuestOsIdentifierFreebsd11_64Guest, + VirtualMachineGuestOsIdentifierFreebsd12Guest, + VirtualMachineGuestOsIdentifierFreebsd12_64Guest, + VirtualMachineGuestOsIdentifierFreebsd13Guest, + VirtualMachineGuestOsIdentifierFreebsd13_64Guest, + VirtualMachineGuestOsIdentifierFreebsd14Guest, + VirtualMachineGuestOsIdentifierFreebsd14_64Guest, + VirtualMachineGuestOsIdentifierRedhatGuest, + VirtualMachineGuestOsIdentifierRhel2Guest, + VirtualMachineGuestOsIdentifierRhel3Guest, + VirtualMachineGuestOsIdentifierRhel3_64Guest, + VirtualMachineGuestOsIdentifierRhel4Guest, + VirtualMachineGuestOsIdentifierRhel4_64Guest, + VirtualMachineGuestOsIdentifierRhel5Guest, + VirtualMachineGuestOsIdentifierRhel5_64Guest, + VirtualMachineGuestOsIdentifierRhel6Guest, + VirtualMachineGuestOsIdentifierRhel6_64Guest, + VirtualMachineGuestOsIdentifierRhel7Guest, + VirtualMachineGuestOsIdentifierRhel7_64Guest, + VirtualMachineGuestOsIdentifierRhel8_64Guest, + VirtualMachineGuestOsIdentifierRhel9_64Guest, + VirtualMachineGuestOsIdentifierCentosGuest, + VirtualMachineGuestOsIdentifierCentos64Guest, + VirtualMachineGuestOsIdentifierCentos6Guest, + VirtualMachineGuestOsIdentifierCentos6_64Guest, + VirtualMachineGuestOsIdentifierCentos7Guest, + VirtualMachineGuestOsIdentifierCentos7_64Guest, + VirtualMachineGuestOsIdentifierCentos8_64Guest, + VirtualMachineGuestOsIdentifierCentos9_64Guest, + VirtualMachineGuestOsIdentifierOracleLinuxGuest, + VirtualMachineGuestOsIdentifierOracleLinux64Guest, + VirtualMachineGuestOsIdentifierOracleLinux6Guest, + VirtualMachineGuestOsIdentifierOracleLinux6_64Guest, + VirtualMachineGuestOsIdentifierOracleLinux7Guest, + VirtualMachineGuestOsIdentifierOracleLinux7_64Guest, + VirtualMachineGuestOsIdentifierOracleLinux8_64Guest, + VirtualMachineGuestOsIdentifierOracleLinux9_64Guest, + VirtualMachineGuestOsIdentifierSuseGuest, + VirtualMachineGuestOsIdentifierSuse64Guest, + VirtualMachineGuestOsIdentifierSlesGuest, + VirtualMachineGuestOsIdentifierSles64Guest, + VirtualMachineGuestOsIdentifierSles10Guest, + VirtualMachineGuestOsIdentifierSles10_64Guest, + VirtualMachineGuestOsIdentifierSles11Guest, + VirtualMachineGuestOsIdentifierSles11_64Guest, + VirtualMachineGuestOsIdentifierSles12Guest, + VirtualMachineGuestOsIdentifierSles12_64Guest, + VirtualMachineGuestOsIdentifierSles15_64Guest, + VirtualMachineGuestOsIdentifierSles16_64Guest, + VirtualMachineGuestOsIdentifierNld9Guest, + VirtualMachineGuestOsIdentifierOesGuest, + VirtualMachineGuestOsIdentifierSjdsGuest, + VirtualMachineGuestOsIdentifierMandrakeGuest, + VirtualMachineGuestOsIdentifierMandrivaGuest, + VirtualMachineGuestOsIdentifierMandriva64Guest, + VirtualMachineGuestOsIdentifierTurboLinuxGuest, + VirtualMachineGuestOsIdentifierTurboLinux64Guest, + VirtualMachineGuestOsIdentifierUbuntuGuest, + VirtualMachineGuestOsIdentifierUbuntu64Guest, + VirtualMachineGuestOsIdentifierDebian4Guest, + VirtualMachineGuestOsIdentifierDebian4_64Guest, + VirtualMachineGuestOsIdentifierDebian5Guest, + VirtualMachineGuestOsIdentifierDebian5_64Guest, + VirtualMachineGuestOsIdentifierDebian6Guest, + VirtualMachineGuestOsIdentifierDebian6_64Guest, + VirtualMachineGuestOsIdentifierDebian7Guest, + VirtualMachineGuestOsIdentifierDebian7_64Guest, + VirtualMachineGuestOsIdentifierDebian8Guest, + VirtualMachineGuestOsIdentifierDebian8_64Guest, + VirtualMachineGuestOsIdentifierDebian9Guest, + VirtualMachineGuestOsIdentifierDebian9_64Guest, + VirtualMachineGuestOsIdentifierDebian10Guest, + VirtualMachineGuestOsIdentifierDebian10_64Guest, + VirtualMachineGuestOsIdentifierDebian11Guest, + VirtualMachineGuestOsIdentifierDebian11_64Guest, + VirtualMachineGuestOsIdentifierDebian12Guest, + VirtualMachineGuestOsIdentifierDebian12_64Guest, + VirtualMachineGuestOsIdentifierAsianux3Guest, + VirtualMachineGuestOsIdentifierAsianux3_64Guest, + VirtualMachineGuestOsIdentifierAsianux4Guest, + VirtualMachineGuestOsIdentifierAsianux4_64Guest, + VirtualMachineGuestOsIdentifierAsianux5_64Guest, + VirtualMachineGuestOsIdentifierAsianux7_64Guest, + VirtualMachineGuestOsIdentifierAsianux8_64Guest, + VirtualMachineGuestOsIdentifierAsianux9_64Guest, + VirtualMachineGuestOsIdentifierOpensuseGuest, + VirtualMachineGuestOsIdentifierOpensuse64Guest, + VirtualMachineGuestOsIdentifierFedoraGuest, + VirtualMachineGuestOsIdentifierFedora64Guest, + VirtualMachineGuestOsIdentifierCoreos64Guest, + VirtualMachineGuestOsIdentifierVmwarePhoton64Guest, + VirtualMachineGuestOsIdentifierOther24xLinuxGuest, + VirtualMachineGuestOsIdentifierOther26xLinuxGuest, + VirtualMachineGuestOsIdentifierOtherLinuxGuest, + VirtualMachineGuestOsIdentifierOther3xLinuxGuest, + VirtualMachineGuestOsIdentifierOther4xLinuxGuest, + VirtualMachineGuestOsIdentifierOther5xLinuxGuest, + VirtualMachineGuestOsIdentifierOther6xLinuxGuest, + VirtualMachineGuestOsIdentifierGenericLinuxGuest, + VirtualMachineGuestOsIdentifierOther24xLinux64Guest, + VirtualMachineGuestOsIdentifierOther26xLinux64Guest, + VirtualMachineGuestOsIdentifierOther3xLinux64Guest, + VirtualMachineGuestOsIdentifierOther4xLinux64Guest, + VirtualMachineGuestOsIdentifierOther5xLinux64Guest, + VirtualMachineGuestOsIdentifierOther6xLinux64Guest, + VirtualMachineGuestOsIdentifierOtherLinux64Guest, + VirtualMachineGuestOsIdentifierSolaris6Guest, + VirtualMachineGuestOsIdentifierSolaris7Guest, + VirtualMachineGuestOsIdentifierSolaris8Guest, + VirtualMachineGuestOsIdentifierSolaris9Guest, + VirtualMachineGuestOsIdentifierSolaris10Guest, + VirtualMachineGuestOsIdentifierSolaris10_64Guest, + VirtualMachineGuestOsIdentifierSolaris11_64Guest, + VirtualMachineGuestOsIdentifierOs2Guest, + VirtualMachineGuestOsIdentifierEComStationGuest, + VirtualMachineGuestOsIdentifierEComStation2Guest, + VirtualMachineGuestOsIdentifierNetware4Guest, + VirtualMachineGuestOsIdentifierNetware5Guest, + VirtualMachineGuestOsIdentifierNetware6Guest, + VirtualMachineGuestOsIdentifierOpenServer5Guest, + VirtualMachineGuestOsIdentifierOpenServer6Guest, + VirtualMachineGuestOsIdentifierUnixWare7Guest, + VirtualMachineGuestOsIdentifierDarwinGuest, + VirtualMachineGuestOsIdentifierDarwin64Guest, + VirtualMachineGuestOsIdentifierDarwin10Guest, + VirtualMachineGuestOsIdentifierDarwin10_64Guest, + VirtualMachineGuestOsIdentifierDarwin11Guest, + VirtualMachineGuestOsIdentifierDarwin11_64Guest, + VirtualMachineGuestOsIdentifierDarwin12_64Guest, + VirtualMachineGuestOsIdentifierDarwin13_64Guest, + VirtualMachineGuestOsIdentifierDarwin14_64Guest, + VirtualMachineGuestOsIdentifierDarwin15_64Guest, + VirtualMachineGuestOsIdentifierDarwin16_64Guest, + VirtualMachineGuestOsIdentifierDarwin17_64Guest, + VirtualMachineGuestOsIdentifierDarwin18_64Guest, + VirtualMachineGuestOsIdentifierDarwin19_64Guest, + VirtualMachineGuestOsIdentifierDarwin20_64Guest, + VirtualMachineGuestOsIdentifierDarwin21_64Guest, + VirtualMachineGuestOsIdentifierDarwin22_64Guest, + VirtualMachineGuestOsIdentifierDarwin23_64Guest, + VirtualMachineGuestOsIdentifierVmkernelGuest, + VirtualMachineGuestOsIdentifierVmkernel5Guest, + VirtualMachineGuestOsIdentifierVmkernel6Guest, + VirtualMachineGuestOsIdentifierVmkernel65Guest, + VirtualMachineGuestOsIdentifierVmkernel7Guest, + VirtualMachineGuestOsIdentifierVmkernel8Guest, + VirtualMachineGuestOsIdentifierAmazonlinux2_64Guest, + VirtualMachineGuestOsIdentifierAmazonlinux3_64Guest, + VirtualMachineGuestOsIdentifierCrxPod1Guest, + VirtualMachineGuestOsIdentifierCrxSys1Guest, + VirtualMachineGuestOsIdentifierRockylinux_64Guest, + VirtualMachineGuestOsIdentifierAlmalinux_64Guest, + VirtualMachineGuestOsIdentifierOtherGuest, + VirtualMachineGuestOsIdentifierOtherGuest64, + } +} + +func (e VirtualMachineGuestOsIdentifier) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineGuestOsIdentifier"] = reflect.TypeOf((*VirtualMachineGuestOsIdentifier)(nil)).Elem() minAPIVersionForEnumValue["VirtualMachineGuestOsIdentifier"] = map[string]string{ - "winNetDatacenterGuest": "2.5", - "winLonghornGuest": "2.5", - "winLonghorn64Guest": "2.5", - "winNetDatacenter64Guest": "2.5", - "windows7Guest": "4.0", - "windows7_64Guest": "4.0", - "windows7Server64Guest": "4.0", - "windows8Guest": "5.0", - "windows8_64Guest": "5.0", - "windows8Server64Guest": "5.0", - "windows9Guest": "6.0", - "windows9_64Guest": "6.0", - "windows9Server64Guest": "6.0", "windows11_64Guest": "8.0.0.1", "windows12_64Guest": "8.0.0.1", - "windowsHyperVGuest": "5.5", - "windows2019srv_64Guest": "7.0", "windows2019srvNext_64Guest": "7.0.1.0", "windows2022srvNext_64Guest": "8.0.0.1", - "freebsd11Guest": "6.7", - "freebsd11_64Guest": "6.7", - "freebsd12Guest": "6.7", - "freebsd12_64Guest": "6.7", "freebsd13Guest": "7.0.1.0", "freebsd13_64Guest": "7.0.1.0", "freebsd14Guest": "8.0.0.1", "freebsd14_64Guest": "8.0.0.1", - "rhel5Guest": "2.5", - "rhel5_64Guest": "2.5", - "rhel6Guest": "4.0", - "rhel6_64Guest": "4.0", - "rhel7Guest": "5.5", - "rhel7_64Guest": "5.5", - "rhel8_64Guest": "6.7", "rhel9_64Guest": "7.0.1.0", - "centosGuest": "4.1", - "centos64Guest": "4.1", - "centos6Guest": "6.5", - "centos6_64Guest": "6.5", - "centos7Guest": "6.5", - "centos7_64Guest": "6.5", - "centos8_64Guest": "6.7", "centos9_64Guest": "7.0.1.0", - "oracleLinuxGuest": "4.1", - "oracleLinux64Guest": "4.1", - "oracleLinux6Guest": "6.5", - "oracleLinux6_64Guest": "6.5", - "oracleLinux7Guest": "6.5", - "oracleLinux7_64Guest": "6.5", - "oracleLinux8_64Guest": "6.7", "oracleLinux9_64Guest": "7.0.1.0", - "sles10Guest": "2.5", - "sles10_64Guest": "2.5", - "sles11Guest": "4.0", - "sles11_64Guest": "4.0", - "sles12Guest": "5.5", - "sles12_64Guest": "5.5", - "sles15_64Guest": "6.7", "sles16_64Guest": "7.0.1.0", - "mandrakeGuest": "5.5", - "mandrivaGuest": "2.5 U2", - "mandriva64Guest": "2.5 U2", - "turboLinux64Guest": "2.5 U2", - "debian4Guest": "2.5 U2", - "debian4_64Guest": "2.5 U2", - "debian5Guest": "4.0", - "debian5_64Guest": "4.0", - "debian6Guest": "5.0", - "debian6_64Guest": "5.0", - "debian7Guest": "5.5", - "debian7_64Guest": "5.5", - "debian8Guest": "6.0", - "debian8_64Guest": "6.0", - "debian9Guest": "6.5", - "debian9_64Guest": "6.5", - "debian10Guest": "6.5", - "debian10_64Guest": "6.5", - "debian11Guest": "7.0", - "debian11_64Guest": "7.0", "debian12Guest": "8.0.0.1", "debian12_64Guest": "8.0.0.1", - "asianux3Guest": "2.5 U2", - "asianux3_64Guest": "2.5 U2", - "asianux4Guest": "4.0", - "asianux4_64Guest": "4.0", - "asianux5_64Guest": "6.0", - "asianux7_64Guest": "6.5", - "asianux8_64Guest": "6.7", "asianux9_64Guest": "7.0.1.0", - "opensuseGuest": "5.1", - "opensuse64Guest": "5.1", - "fedoraGuest": "5.1", - "fedora64Guest": "5.1", - "coreos64Guest": "6.0", - "vmwarePhoton64Guest": "6.5", - "other3xLinuxGuest": "5.5", - "other4xLinuxGuest": "6.7", "other5xLinuxGuest": "7.0.1.0", "other6xLinuxGuest": "8.0.0.1", - "genericLinuxGuest": "5.5", - "other3xLinux64Guest": "5.5", - "other4xLinux64Guest": "6.7", "other5xLinux64Guest": "7.0.1.0", "other6xLinux64Guest": "8.0.0.1", - "solaris11_64Guest": "5.0", - "eComStationGuest": "4.1", - "eComStation2Guest": "5.0", - "openServer5Guest": "2.5 U2", - "openServer6Guest": "2.5 U2", - "unixWare7Guest": "2.5 U2", - "darwin64Guest": "4.0", - "darwin10Guest": "5.0", - "darwin10_64Guest": "5.0", - "darwin11Guest": "5.0", - "darwin11_64Guest": "5.0", - "darwin12_64Guest": "5.5", - "darwin13_64Guest": "5.5", - "darwin14_64Guest": "6.0", - "darwin15_64Guest": "6.5", - "darwin16_64Guest": "6.5", - "darwin17_64Guest": "6.7", - "darwin18_64Guest": "6.7", - "darwin19_64Guest": "7.0", "darwin20_64Guest": "7.0.1.0", "darwin21_64Guest": "7.0.1.0", "darwin22_64Guest": "8.0.0.1", "darwin23_64Guest": "8.0.0.1", - "vmkernelGuest": "5.0", - "vmkernel5Guest": "5.0", - "vmkernel6Guest": "6.0", - "vmkernel65Guest": "6.5", - "vmkernel7Guest": "7.0", "vmkernel8Guest": "8.0.0.1", - "amazonlinux2_64Guest": "6.7.1", "amazonlinux3_64Guest": "7.0.1.0", - "crxPod1Guest": "7.0", + "crxSys1Guest": "8.0.3.0", "rockylinux_64Guest": "8.0.0.1", "almalinux_64Guest": "8.0.0.1", } @@ -8865,6 +13688,21 @@ const ( VirtualMachineGuestStateUnknown = VirtualMachineGuestState("unknown") ) +func (e VirtualMachineGuestState) Values() []VirtualMachineGuestState { + return []VirtualMachineGuestState{ + VirtualMachineGuestStateRunning, + VirtualMachineGuestStateShuttingDown, + VirtualMachineGuestStateResetting, + VirtualMachineGuestStateStandby, + VirtualMachineGuestStateNotRunning, + VirtualMachineGuestStateUnknown, + } +} + +func (e VirtualMachineGuestState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineGuestState"] = reflect.TypeOf((*VirtualMachineGuestState)(nil)).Elem() } @@ -8899,10 +13737,23 @@ const ( VirtualMachineHtSharingInternal = VirtualMachineHtSharing("internal") ) +func (e VirtualMachineHtSharing) Values() []VirtualMachineHtSharing { + return []VirtualMachineHtSharing{ + VirtualMachineHtSharingAny, + VirtualMachineHtSharingNone, + VirtualMachineHtSharingInternal, + } +} + +func (e VirtualMachineHtSharing) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineHtSharing"] = reflect.TypeOf((*VirtualMachineHtSharing)(nil)).Elem() } +// Means for allocating additional memory for virtual machines. type VirtualMachineMemoryAllocationPolicy string const ( @@ -8914,11 +13765,23 @@ const ( VirtualMachineMemoryAllocationPolicySwapMost = VirtualMachineMemoryAllocationPolicy("swapMost") ) -func init() { - t["VirtualMachineMemoryAllocationPolicy"] = reflect.TypeOf((*VirtualMachineMemoryAllocationPolicy)(nil)).Elem() - minAPIVersionForType["VirtualMachineMemoryAllocationPolicy"] = "2.5" +func (e VirtualMachineMemoryAllocationPolicy) Values() []VirtualMachineMemoryAllocationPolicy { + return []VirtualMachineMemoryAllocationPolicy{ + VirtualMachineMemoryAllocationPolicySwapNone, + VirtualMachineMemoryAllocationPolicySwapSome, + VirtualMachineMemoryAllocationPolicySwapMost, + } } +func (e VirtualMachineMemoryAllocationPolicy) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineMemoryAllocationPolicy"] = reflect.TypeOf((*VirtualMachineMemoryAllocationPolicy)(nil)).Elem() +} + +// This enum represents the set of legal operations type VirtualMachineMetadataManagerVmMetadataOp string const ( @@ -8928,21 +13791,41 @@ const ( VirtualMachineMetadataManagerVmMetadataOpRemove = VirtualMachineMetadataManagerVmMetadataOp("Remove") ) +func (e VirtualMachineMetadataManagerVmMetadataOp) Values() []VirtualMachineMetadataManagerVmMetadataOp { + return []VirtualMachineMetadataManagerVmMetadataOp{ + VirtualMachineMetadataManagerVmMetadataOpUpdate, + VirtualMachineMetadataManagerVmMetadataOpRemove, + } +} + +func (e VirtualMachineMetadataManagerVmMetadataOp) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineMetadataManagerVmMetadataOp"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOp)(nil)).Elem() - minAPIVersionForType["VirtualMachineMetadataManagerVmMetadataOp"] = "5.5" } // This enum contains a list of valid owner values for +// the name field type VirtualMachineMetadataManagerVmMetadataOwnerOwner string const ( VirtualMachineMetadataManagerVmMetadataOwnerOwnerComVmwareVsphereHA = VirtualMachineMetadataManagerVmMetadataOwnerOwner("ComVmwareVsphereHA") ) +func (e VirtualMachineMetadataManagerVmMetadataOwnerOwner) Values() []VirtualMachineMetadataManagerVmMetadataOwnerOwner { + return []VirtualMachineMetadataManagerVmMetadataOwnerOwner{ + VirtualMachineMetadataManagerVmMetadataOwnerOwnerComVmwareVsphereHA, + } +} + +func (e VirtualMachineMetadataManagerVmMetadataOwnerOwner) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineMetadataManagerVmMetadataOwnerOwner"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOwnerOwner)(nil)).Elem() - minAPIVersionForType["VirtualMachineMetadataManagerVmMetadataOwnerOwner"] = "5.5" } // MovePriority is an enumeration of values that indicate the priority of the task @@ -8961,11 +13844,24 @@ const ( VirtualMachineMovePriorityDefaultPriority = VirtualMachineMovePriority("defaultPriority") ) +func (e VirtualMachineMovePriority) Values() []VirtualMachineMovePriority { + return []VirtualMachineMovePriority{ + VirtualMachineMovePriorityLowPriority, + VirtualMachineMovePriorityHighPriority, + VirtualMachineMovePriorityDefaultPriority, + } +} + +func (e VirtualMachineMovePriority) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineMovePriority"] = reflect.TypeOf((*VirtualMachineMovePriority)(nil)).Elem() } // The NeedSecondaryReason type defines all reasons a virtual machine is +// in the needSecondary Fault Tolerance state following a failure. type VirtualMachineNeedSecondaryReason string const ( @@ -8985,14 +13881,27 @@ const ( VirtualMachineNeedSecondaryReasonOther = VirtualMachineNeedSecondaryReason("other") ) -func init() { - t["VirtualMachineNeedSecondaryReason"] = reflect.TypeOf((*VirtualMachineNeedSecondaryReason)(nil)).Elem() - minAPIVersionForType["VirtualMachineNeedSecondaryReason"] = "4.0" - minAPIVersionForEnumValue["VirtualMachineNeedSecondaryReason"] = map[string]string{ - "checkpointError": "6.0", +func (e VirtualMachineNeedSecondaryReason) Values() []VirtualMachineNeedSecondaryReason { + return []VirtualMachineNeedSecondaryReason{ + VirtualMachineNeedSecondaryReasonInitializing, + VirtualMachineNeedSecondaryReasonDivergence, + VirtualMachineNeedSecondaryReasonLostConnection, + VirtualMachineNeedSecondaryReasonPartialHardwareFailure, + VirtualMachineNeedSecondaryReasonUserAction, + VirtualMachineNeedSecondaryReasonCheckpointError, + VirtualMachineNeedSecondaryReasonOther, } } +func (e VirtualMachineNeedSecondaryReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineNeedSecondaryReason"] = reflect.TypeOf((*VirtualMachineNeedSecondaryReason)(nil)).Elem() +} + +// Set of possible values for `VirtualMachineFlagInfo.snapshotPowerOffBehavior`. type VirtualMachinePowerOffBehavior string const ( @@ -9006,12 +13915,21 @@ const ( VirtualMachinePowerOffBehaviorTake = VirtualMachinePowerOffBehavior("take") ) +func (e VirtualMachinePowerOffBehavior) Values() []VirtualMachinePowerOffBehavior { + return []VirtualMachinePowerOffBehavior{ + VirtualMachinePowerOffBehaviorPowerOff, + VirtualMachinePowerOffBehaviorRevert, + VirtualMachinePowerOffBehaviorPrompt, + VirtualMachinePowerOffBehaviorTake, + } +} + +func (e VirtualMachinePowerOffBehavior) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachinePowerOffBehavior"] = reflect.TypeOf((*VirtualMachinePowerOffBehavior)(nil)).Elem() - minAPIVersionForType["VirtualMachinePowerOffBehavior"] = "2.5" - minAPIVersionForEnumValue["VirtualMachinePowerOffBehavior"] = map[string]string{ - "take": "6.0", - } } // The list of possible default power operations available for the virtual machine @@ -9023,6 +13941,18 @@ const ( VirtualMachinePowerOpTypePreset = VirtualMachinePowerOpType("preset") ) +func (e VirtualMachinePowerOpType) Values() []VirtualMachinePowerOpType { + return []VirtualMachinePowerOpType{ + VirtualMachinePowerOpTypeSoft, + VirtualMachinePowerOpTypeHard, + VirtualMachinePowerOpTypePreset, + } +} + +func (e VirtualMachinePowerOpType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachinePowerOpType"] = reflect.TypeOf((*VirtualMachinePowerOpType)(nil)).Elem() } @@ -9052,6 +13982,18 @@ const ( VirtualMachinePowerStateSuspended = VirtualMachinePowerState("suspended") ) +func (e VirtualMachinePowerState) Values() []VirtualMachinePowerState { + return []VirtualMachinePowerState{ + VirtualMachinePowerStatePoweredOff, + VirtualMachinePowerStatePoweredOn, + VirtualMachinePowerStateSuspended, + } +} + +func (e VirtualMachinePowerState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachinePowerState"] = reflect.TypeOf((*VirtualMachinePowerState)(nil)).Elem() } @@ -9059,6 +14001,7 @@ func init() { // Deprecated as of vSphere API 6.0. // // The RecordReplayState type defines a simple set of record and replay +// states for a virtual machine. type VirtualMachineRecordReplayState string const ( @@ -9071,9 +14014,20 @@ const ( VirtualMachineRecordReplayStateInactive = VirtualMachineRecordReplayState("inactive") ) +func (e VirtualMachineRecordReplayState) Values() []VirtualMachineRecordReplayState { + return []VirtualMachineRecordReplayState{ + VirtualMachineRecordReplayStateRecording, + VirtualMachineRecordReplayStateReplaying, + VirtualMachineRecordReplayStateInactive, + } +} + +func (e VirtualMachineRecordReplayState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineRecordReplayState"] = reflect.TypeOf((*VirtualMachineRecordReplayState)(nil)).Elem() - minAPIVersionForType["VirtualMachineRecordReplayState"] = "4.0" } // Specifies how a virtual disk is moved or copied to a @@ -9094,6 +14048,8 @@ func init() { // a *relocate operation*, // `VirtualMachine.PromoteDisks_Task` has been provided as // a way to unshare such disk backings. +// +// See also `VirtualDiskSparseVer1BackingInfo.parent`, `VirtualDiskSparseVer2BackingInfo.parent`, `VirtualDiskFlatVer1BackingInfo.parent`, `VirtualDiskFlatVer2BackingInfo.parent`, `VirtualDiskRawDiskMappingVer1BackingInfo.parent`, `VirtualMachineRelocateSpec.diskMoveType`, `VirtualMachineRelocateSpecDiskLocator.diskMoveType`. type VirtualMachineRelocateDiskMoveOptions string const ( @@ -9149,12 +14105,22 @@ const ( VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndConsolidate = VirtualMachineRelocateDiskMoveOptions("moveAllDiskBackingsAndConsolidate") ) +func (e VirtualMachineRelocateDiskMoveOptions) Values() []VirtualMachineRelocateDiskMoveOptions { + return []VirtualMachineRelocateDiskMoveOptions{ + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndAllowSharing, + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndDisallowSharing, + VirtualMachineRelocateDiskMoveOptionsMoveChildMostDiskBacking, + VirtualMachineRelocateDiskMoveOptionsCreateNewChildDiskBacking, + VirtualMachineRelocateDiskMoveOptionsMoveAllDiskBackingsAndConsolidate, + } +} + +func (e VirtualMachineRelocateDiskMoveOptions) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineRelocateDiskMoveOptions"] = reflect.TypeOf((*VirtualMachineRelocateDiskMoveOptions)(nil)).Elem() - minAPIVersionForType["VirtualMachineRelocateDiskMoveOptions"] = "4.0" - minAPIVersionForEnumValue["VirtualMachineRelocateDiskMoveOptions"] = map[string]string{ - "moveAllDiskBackingsAndConsolidate": "5.1", - } } // Deprecated as of vSphere API 5.0. @@ -9168,6 +14134,17 @@ const ( VirtualMachineRelocateTransformationSparse = VirtualMachineRelocateTransformation("sparse") ) +func (e VirtualMachineRelocateTransformation) Values() []VirtualMachineRelocateTransformation { + return []VirtualMachineRelocateTransformation{ + VirtualMachineRelocateTransformationFlat, + VirtualMachineRelocateTransformationSparse, + } +} + +func (e VirtualMachineRelocateTransformation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineRelocateTransformation"] = reflect.TypeOf((*VirtualMachineRelocateTransformation)(nil)).Elem() } @@ -9190,10 +14167,32 @@ const ( VirtualMachineScsiPassthroughTypeUnknown = VirtualMachineScsiPassthroughType("unknown") ) +func (e VirtualMachineScsiPassthroughType) Values() []VirtualMachineScsiPassthroughType { + return []VirtualMachineScsiPassthroughType{ + VirtualMachineScsiPassthroughTypeDisk, + VirtualMachineScsiPassthroughTypeTape, + VirtualMachineScsiPassthroughTypePrinter, + VirtualMachineScsiPassthroughTypeProcessor, + VirtualMachineScsiPassthroughTypeWorm, + VirtualMachineScsiPassthroughTypeCdrom, + VirtualMachineScsiPassthroughTypeScanner, + VirtualMachineScsiPassthroughTypeOptical, + VirtualMachineScsiPassthroughTypeMedia, + VirtualMachineScsiPassthroughTypeCom, + VirtualMachineScsiPassthroughTypeRaid, + VirtualMachineScsiPassthroughTypeUnknown, + } +} + +func (e VirtualMachineScsiPassthroughType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineScsiPassthroughType"] = reflect.TypeOf((*VirtualMachineScsiPassthroughType)(nil)).Elem() } +// Flexible Launch Enclave (FLC) modes. type VirtualMachineSgxInfoFlcModes string const ( @@ -9209,9 +14208,19 @@ const ( VirtualMachineSgxInfoFlcModesUnlocked = VirtualMachineSgxInfoFlcModes("unlocked") ) +func (e VirtualMachineSgxInfoFlcModes) Values() []VirtualMachineSgxInfoFlcModes { + return []VirtualMachineSgxInfoFlcModes{ + VirtualMachineSgxInfoFlcModesLocked, + VirtualMachineSgxInfoFlcModesUnlocked, + } +} + +func (e VirtualMachineSgxInfoFlcModes) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineSgxInfoFlcModes"] = reflect.TypeOf((*VirtualMachineSgxInfoFlcModes)(nil)).Elem() - minAPIVersionForType["VirtualMachineSgxInfoFlcModes"] = "7.0" } // The list of possible standby actions that the virtual machine can take @@ -9223,6 +14232,17 @@ const ( VirtualMachineStandbyActionTypePowerOnSuspend = VirtualMachineStandbyActionType("powerOnSuspend") ) +func (e VirtualMachineStandbyActionType) Values() []VirtualMachineStandbyActionType { + return []VirtualMachineStandbyActionType{ + VirtualMachineStandbyActionTypeCheckpoint, + VirtualMachineStandbyActionTypePowerOnSuspend, + } +} + +func (e VirtualMachineStandbyActionType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineStandbyActionType"] = reflect.TypeOf((*VirtualMachineStandbyActionType)(nil)).Elem() } @@ -9240,28 +14260,34 @@ const ( VirtualMachineTargetInfoConfigurationTagClusterWide = VirtualMachineTargetInfoConfigurationTag("clusterWide") ) +func (e VirtualMachineTargetInfoConfigurationTag) Values() []VirtualMachineTargetInfoConfigurationTag { + return []VirtualMachineTargetInfoConfigurationTag{ + VirtualMachineTargetInfoConfigurationTagCompliant, + VirtualMachineTargetInfoConfigurationTagClusterWide, + } +} + +func (e VirtualMachineTargetInfoConfigurationTag) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineTargetInfoConfigurationTag"] = reflect.TypeOf((*VirtualMachineTargetInfoConfigurationTag)(nil)).Elem() } +// The virtual machine ticket type. type VirtualMachineTicketType string const ( - // - // // Deprecated as of vSphere API 8.0. Use `webmks` instead. // // Remote mouse-keyboard-screen ticket. VirtualMachineTicketTypeMks = VirtualMachineTicketType("mks") - // - // // Deprecated as of vSphere 8.0 API. Use `webRemoteDevice` // instead. // // Remote device ticket. VirtualMachineTicketTypeDevice = VirtualMachineTicketType("device") - // - // // Deprecated as of vSphere 6.6.3 API. Use // `GuestOperationsManager` instead. // @@ -9283,16 +14309,26 @@ const ( VirtualMachineTicketTypeWebRemoteDevice = VirtualMachineTicketType("webRemoteDevice") ) -func init() { - t["VirtualMachineTicketType"] = reflect.TypeOf((*VirtualMachineTicketType)(nil)).Elem() - minAPIVersionForType["VirtualMachineTicketType"] = "4.1" - minAPIVersionForEnumValue["VirtualMachineTicketType"] = map[string]string{ - "webmks": "6.0", - "guestIntegrity": "6.7", - "webRemoteDevice": "7.0", +func (e VirtualMachineTicketType) Values() []VirtualMachineTicketType { + return []VirtualMachineTicketType{ + VirtualMachineTicketTypeMks, + VirtualMachineTicketTypeDevice, + VirtualMachineTicketTypeGuestControl, + VirtualMachineTicketTypeWebmks, + VirtualMachineTicketTypeGuestIntegrity, + VirtualMachineTicketTypeWebRemoteDevice, } } +func (e VirtualMachineTicketType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineTicketType"] = reflect.TypeOf((*VirtualMachineTicketType)(nil)).Elem() +} + +// The installation type of tools in the VM. type VirtualMachineToolsInstallType string const ( @@ -9316,12 +14352,26 @@ const ( VirtualMachineToolsInstallTypeGuestToolsTypeOpenVMTools = VirtualMachineToolsInstallType("guestToolsTypeOpenVMTools") ) +func (e VirtualMachineToolsInstallType) Values() []VirtualMachineToolsInstallType { + return []VirtualMachineToolsInstallType{ + VirtualMachineToolsInstallTypeGuestToolsTypeUnknown, + VirtualMachineToolsInstallTypeGuestToolsTypeMSI, + VirtualMachineToolsInstallTypeGuestToolsTypeTar, + VirtualMachineToolsInstallTypeGuestToolsTypeOSP, + VirtualMachineToolsInstallTypeGuestToolsTypeOpenVMTools, + } +} + +func (e VirtualMachineToolsInstallType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineToolsInstallType"] = reflect.TypeOf((*VirtualMachineToolsInstallType)(nil)).Elem() - minAPIVersionForType["VirtualMachineToolsInstallType"] = "6.5" } // Current running status of VMware Tools running in the guest +// operating system. type VirtualMachineToolsRunningStatus string const ( @@ -9333,9 +14383,20 @@ const ( VirtualMachineToolsRunningStatusGuestToolsExecutingScripts = VirtualMachineToolsRunningStatus("guestToolsExecutingScripts") ) +func (e VirtualMachineToolsRunningStatus) Values() []VirtualMachineToolsRunningStatus { + return []VirtualMachineToolsRunningStatus{ + VirtualMachineToolsRunningStatusGuestToolsNotRunning, + VirtualMachineToolsRunningStatusGuestToolsRunning, + VirtualMachineToolsRunningStatusGuestToolsExecutingScripts, + } +} + +func (e VirtualMachineToolsRunningStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineToolsRunningStatus"] = reflect.TypeOf((*VirtualMachineToolsRunningStatus)(nil)).Elem() - minAPIVersionForType["VirtualMachineToolsRunningStatus"] = "4.0" } // Deprecated as of vSphere API 4.0 use `VirtualMachineToolsVersionStatus_enum` @@ -9356,18 +14417,30 @@ const ( VirtualMachineToolsStatusToolsOk = VirtualMachineToolsStatus("toolsOk") ) +func (e VirtualMachineToolsStatus) Values() []VirtualMachineToolsStatus { + return []VirtualMachineToolsStatus{ + VirtualMachineToolsStatusToolsNotInstalled, + VirtualMachineToolsStatusToolsNotRunning, + VirtualMachineToolsStatusToolsOld, + VirtualMachineToolsStatusToolsOk, + } +} + +func (e VirtualMachineToolsStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineToolsStatus"] = reflect.TypeOf((*VirtualMachineToolsStatus)(nil)).Elem() } // Current version status of VMware Tools installed in the guest operating +// system. type VirtualMachineToolsVersionStatus string const ( // VMware Tools has never been installed. VirtualMachineToolsVersionStatusGuestToolsNotInstalled = VirtualMachineToolsVersionStatus("guestToolsNotInstalled") - // - // // Deprecated as of vSphere API 5.1 value is not reported by // toolsVersionStatus2, instead more detailed status is reported. // @@ -9392,18 +14465,29 @@ const ( VirtualMachineToolsVersionStatusGuestToolsBlacklisted = VirtualMachineToolsVersionStatus("guestToolsBlacklisted") ) -func init() { - t["VirtualMachineToolsVersionStatus"] = reflect.TypeOf((*VirtualMachineToolsVersionStatus)(nil)).Elem() - minAPIVersionForType["VirtualMachineToolsVersionStatus"] = "4.0" - minAPIVersionForEnumValue["VirtualMachineToolsVersionStatus"] = map[string]string{ - "guestToolsTooOld": "5.0", - "guestToolsSupportedOld": "5.0", - "guestToolsSupportedNew": "5.0", - "guestToolsTooNew": "5.0", - "guestToolsBlacklisted": "5.0", +func (e VirtualMachineToolsVersionStatus) Values() []VirtualMachineToolsVersionStatus { + return []VirtualMachineToolsVersionStatus{ + VirtualMachineToolsVersionStatusGuestToolsNotInstalled, + VirtualMachineToolsVersionStatusGuestToolsNeedUpgrade, + VirtualMachineToolsVersionStatusGuestToolsCurrent, + VirtualMachineToolsVersionStatusGuestToolsUnmanaged, + VirtualMachineToolsVersionStatusGuestToolsTooOld, + VirtualMachineToolsVersionStatusGuestToolsSupportedOld, + VirtualMachineToolsVersionStatusGuestToolsSupportedNew, + VirtualMachineToolsVersionStatusGuestToolsTooNew, + VirtualMachineToolsVersionStatusGuestToolsBlacklisted, } } +func (e VirtualMachineToolsVersionStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineToolsVersionStatus"] = reflect.TypeOf((*VirtualMachineToolsVersionStatus)(nil)).Elem() +} + +// Device class family. type VirtualMachineUsbInfoFamily string const ( @@ -9450,11 +14534,39 @@ const ( VirtualMachineUsbInfoFamilyUnknownFamily = VirtualMachineUsbInfoFamily("unknownFamily") ) -func init() { - t["VirtualMachineUsbInfoFamily"] = reflect.TypeOf((*VirtualMachineUsbInfoFamily)(nil)).Elem() - minAPIVersionForType["VirtualMachineUsbInfoFamily"] = "2.5" +func (e VirtualMachineUsbInfoFamily) Values() []VirtualMachineUsbInfoFamily { + return []VirtualMachineUsbInfoFamily{ + VirtualMachineUsbInfoFamilyAudio, + VirtualMachineUsbInfoFamilyHid, + VirtualMachineUsbInfoFamilyHid_bootable, + VirtualMachineUsbInfoFamilyPhysical, + VirtualMachineUsbInfoFamilyCommunication, + VirtualMachineUsbInfoFamilyImaging, + VirtualMachineUsbInfoFamilyPrinter, + VirtualMachineUsbInfoFamilyStorage, + VirtualMachineUsbInfoFamilyHub, + VirtualMachineUsbInfoFamilySmart_card, + VirtualMachineUsbInfoFamilySecurity, + VirtualMachineUsbInfoFamilyVideo, + VirtualMachineUsbInfoFamilyWireless, + VirtualMachineUsbInfoFamilyBluetooth, + VirtualMachineUsbInfoFamilyWusb, + VirtualMachineUsbInfoFamilyPda, + VirtualMachineUsbInfoFamilyVendor_specific, + VirtualMachineUsbInfoFamilyOther, + VirtualMachineUsbInfoFamilyUnknownFamily, + } } +func (e VirtualMachineUsbInfoFamily) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineUsbInfoFamily"] = reflect.TypeOf((*VirtualMachineUsbInfoFamily)(nil)).Elem() +} + +// Device speed. type VirtualMachineUsbInfoSpeed string const ( @@ -9474,17 +14586,32 @@ const ( VirtualMachineUsbInfoSpeedUnknownSpeed = VirtualMachineUsbInfoSpeed("unknownSpeed") ) +func (e VirtualMachineUsbInfoSpeed) Values() []VirtualMachineUsbInfoSpeed { + return []VirtualMachineUsbInfoSpeed{ + VirtualMachineUsbInfoSpeedLow, + VirtualMachineUsbInfoSpeedFull, + VirtualMachineUsbInfoSpeedHigh, + VirtualMachineUsbInfoSpeedSuperSpeed, + VirtualMachineUsbInfoSpeedSuperSpeedPlus, + VirtualMachineUsbInfoSpeedSuperSpeed20Gbps, + VirtualMachineUsbInfoSpeedUnknownSpeed, + } +} + +func (e VirtualMachineUsbInfoSpeed) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineUsbInfoSpeed"] = reflect.TypeOf((*VirtualMachineUsbInfoSpeed)(nil)).Elem() - minAPIVersionForType["VirtualMachineUsbInfoSpeed"] = "2.5" minAPIVersionForEnumValue["VirtualMachineUsbInfoSpeed"] = map[string]string{ - "superSpeed": "5.0", - "superSpeedPlus": "6.8.7", "superSpeed20Gbps": "7.0.3.2", } } // Set of possible values for action field in FilterSpec. +// +// Determines whether traffic is allowed or denied. type VirtualMachineVMCIDeviceAction string const ( @@ -9494,11 +14621,22 @@ const ( VirtualMachineVMCIDeviceActionDeny = VirtualMachineVMCIDeviceAction("deny") ) -func init() { - t["VirtualMachineVMCIDeviceAction"] = reflect.TypeOf((*VirtualMachineVMCIDeviceAction)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMCIDeviceAction"] = "6.0" +func (e VirtualMachineVMCIDeviceAction) Values() []VirtualMachineVMCIDeviceAction { + return []VirtualMachineVMCIDeviceAction{ + VirtualMachineVMCIDeviceActionAllow, + VirtualMachineVMCIDeviceActionDeny, + } } +func (e VirtualMachineVMCIDeviceAction) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineVMCIDeviceAction"] = reflect.TypeOf((*VirtualMachineVMCIDeviceAction)(nil)).Elem() +} + +// Set of possible values for direction field in FilterSpec. type VirtualMachineVMCIDeviceDirection string const ( @@ -9510,11 +14648,23 @@ const ( VirtualMachineVMCIDeviceDirectionAnyDirection = VirtualMachineVMCIDeviceDirection("anyDirection") ) -func init() { - t["VirtualMachineVMCIDeviceDirection"] = reflect.TypeOf((*VirtualMachineVMCIDeviceDirection)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMCIDeviceDirection"] = "6.0" +func (e VirtualMachineVMCIDeviceDirection) Values() []VirtualMachineVMCIDeviceDirection { + return []VirtualMachineVMCIDeviceDirection{ + VirtualMachineVMCIDeviceDirectionGuest, + VirtualMachineVMCIDeviceDirectionHost, + VirtualMachineVMCIDeviceDirectionAnyDirection, + } } +func (e VirtualMachineVMCIDeviceDirection) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VirtualMachineVMCIDeviceDirection"] = reflect.TypeOf((*VirtualMachineVMCIDeviceDirection)(nil)).Elem() +} + +// Set of possible values for protocol field in FilterSpec. type VirtualMachineVMCIDeviceProtocol string const ( @@ -9539,9 +14689,23 @@ const ( VirtualMachineVMCIDeviceProtocolAnyProtocol = VirtualMachineVMCIDeviceProtocol("anyProtocol") ) +func (e VirtualMachineVMCIDeviceProtocol) Values() []VirtualMachineVMCIDeviceProtocol { + return []VirtualMachineVMCIDeviceProtocol{ + VirtualMachineVMCIDeviceProtocolHypervisor, + VirtualMachineVMCIDeviceProtocolDoorbell, + VirtualMachineVMCIDeviceProtocolQueuepair, + VirtualMachineVMCIDeviceProtocolDatagram, + VirtualMachineVMCIDeviceProtocolStream, + VirtualMachineVMCIDeviceProtocolAnyProtocol, + } +} + +func (e VirtualMachineVMCIDeviceProtocol) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineVMCIDeviceProtocol"] = reflect.TypeOf((*VirtualMachineVMCIDeviceProtocol)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMCIDeviceProtocol"] = "6.0" } type VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType string @@ -9553,6 +14717,19 @@ const ( VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypeDvx = VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType("dvx") ) +func (e VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType) Values() []VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType { + return []VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType{ + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypePciPassthru, + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypeNvidiaVgpu, + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypeSriovNic, + VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentTypeDvx, + } +} + +func (e VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType"] = reflect.TypeOf((*VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType)(nil)).Elem() minAPIVersionForType["VirtualMachineVendorDeviceGroupInfoComponentDeviceInfoComponentType"] = "8.0.0.1" @@ -9565,6 +14742,17 @@ const ( VirtualMachineVgpuProfileInfoProfileClassQuadro = VirtualMachineVgpuProfileInfoProfileClass("quadro") ) +func (e VirtualMachineVgpuProfileInfoProfileClass) Values() []VirtualMachineVgpuProfileInfoProfileClass { + return []VirtualMachineVgpuProfileInfoProfileClass{ + VirtualMachineVgpuProfileInfoProfileClassCompute, + VirtualMachineVgpuProfileInfoProfileClassQuadro, + } +} + +func (e VirtualMachineVgpuProfileInfoProfileClass) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineVgpuProfileInfoProfileClass"] = reflect.TypeOf((*VirtualMachineVgpuProfileInfoProfileClass)(nil)).Elem() minAPIVersionForType["VirtualMachineVgpuProfileInfoProfileClass"] = "7.0.3.0" @@ -9579,11 +14767,23 @@ const ( VirtualMachineVgpuProfileInfoProfileSharingMig = VirtualMachineVgpuProfileInfoProfileSharing("mig") ) +func (e VirtualMachineVgpuProfileInfoProfileSharing) Values() []VirtualMachineVgpuProfileInfoProfileSharing { + return []VirtualMachineVgpuProfileInfoProfileSharing{ + VirtualMachineVgpuProfileInfoProfileSharingTimeSliced, + VirtualMachineVgpuProfileInfoProfileSharingMig, + } +} + +func (e VirtualMachineVgpuProfileInfoProfileSharing) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineVgpuProfileInfoProfileSharing"] = reflect.TypeOf((*VirtualMachineVgpuProfileInfoProfileSharing)(nil)).Elem() minAPIVersionForType["VirtualMachineVgpuProfileInfoProfileSharing"] = "7.0.3.0" } +// Set of possible values for `VirtualMachineVideoCard.use3dRenderer`. type VirtualMachineVideoCardUse3dRenderer string const ( @@ -9595,9 +14795,20 @@ const ( VirtualMachineVideoCardUse3dRendererHardware = VirtualMachineVideoCardUse3dRenderer("hardware") ) +func (e VirtualMachineVideoCardUse3dRenderer) Values() []VirtualMachineVideoCardUse3dRenderer { + return []VirtualMachineVideoCardUse3dRenderer{ + VirtualMachineVideoCardUse3dRendererAutomatic, + VirtualMachineVideoCardUse3dRendererSoftware, + VirtualMachineVideoCardUse3dRendererHardware, + } +} + +func (e VirtualMachineVideoCardUse3dRenderer) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineVideoCardUse3dRenderer"] = reflect.TypeOf((*VirtualMachineVideoCardUse3dRenderer)(nil)).Elem() - minAPIVersionForType["VirtualMachineVideoCardUse3dRenderer"] = "5.1" } type VirtualMachineVirtualDeviceSwapDeviceSwapStatus string @@ -9615,6 +14826,20 @@ const ( VirtualMachineVirtualDeviceSwapDeviceSwapStatusCompleted = VirtualMachineVirtualDeviceSwapDeviceSwapStatus("completed") ) +func (e VirtualMachineVirtualDeviceSwapDeviceSwapStatus) Values() []VirtualMachineVirtualDeviceSwapDeviceSwapStatus { + return []VirtualMachineVirtualDeviceSwapDeviceSwapStatus{ + VirtualMachineVirtualDeviceSwapDeviceSwapStatusNone, + VirtualMachineVirtualDeviceSwapDeviceSwapStatusScheduled, + VirtualMachineVirtualDeviceSwapDeviceSwapStatusInprogress, + VirtualMachineVirtualDeviceSwapDeviceSwapStatusFailed, + VirtualMachineVirtualDeviceSwapDeviceSwapStatusCompleted, + } +} + +func (e VirtualMachineVirtualDeviceSwapDeviceSwapStatus) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineVirtualDeviceSwapDeviceSwapStatus"] = reflect.TypeOf((*VirtualMachineVirtualDeviceSwapDeviceSwapStatus)(nil)).Elem() minAPIVersionForType["VirtualMachineVirtualDeviceSwapDeviceSwapStatus"] = "8.0.0.1" @@ -9632,12 +14857,24 @@ const ( VirtualMachineVirtualPMemSnapshotModeIndependent_eraseonrevert = VirtualMachineVirtualPMemSnapshotMode("independent_eraseonrevert") ) +func (e VirtualMachineVirtualPMemSnapshotMode) Values() []VirtualMachineVirtualPMemSnapshotMode { + return []VirtualMachineVirtualPMemSnapshotMode{ + VirtualMachineVirtualPMemSnapshotModeIndependent_persistent, + VirtualMachineVirtualPMemSnapshotModeIndependent_eraseonrevert, + } +} + +func (e VirtualMachineVirtualPMemSnapshotMode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineVirtualPMemSnapshotMode"] = reflect.TypeOf((*VirtualMachineVirtualPMemSnapshotMode)(nil)).Elem() minAPIVersionForType["VirtualMachineVirtualPMemSnapshotMode"] = "7.0.3.0" } // The VSS Snapshot Context +// VSS\_SNAPSHOT\_CONTEXT values not listed below are not implemented. type VirtualMachineWindowsQuiesceSpecVssBackupContext string const ( @@ -9653,9 +14890,20 @@ const ( VirtualMachineWindowsQuiesceSpecVssBackupContextCtx_file_share_backup = VirtualMachineWindowsQuiesceSpecVssBackupContext("ctx_file_share_backup") ) +func (e VirtualMachineWindowsQuiesceSpecVssBackupContext) Values() []VirtualMachineWindowsQuiesceSpecVssBackupContext { + return []VirtualMachineWindowsQuiesceSpecVssBackupContext{ + VirtualMachineWindowsQuiesceSpecVssBackupContextCtx_auto, + VirtualMachineWindowsQuiesceSpecVssBackupContextCtx_backup, + VirtualMachineWindowsQuiesceSpecVssBackupContextCtx_file_share_backup, + } +} + +func (e VirtualMachineWindowsQuiesceSpecVssBackupContext) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualMachineWindowsQuiesceSpecVssBackupContext"] = reflect.TypeOf((*VirtualMachineWindowsQuiesceSpecVssBackupContext)(nil)).Elem() - minAPIVersionForType["VirtualMachineWindowsQuiesceSpecVssBackupContext"] = "6.5" } type VirtualNVMEControllerSharing string @@ -9665,6 +14913,17 @@ const ( VirtualNVMEControllerSharingPhysicalSharing = VirtualNVMEControllerSharing("physicalSharing") ) +func (e VirtualNVMEControllerSharing) Values() []VirtualNVMEControllerSharing { + return []VirtualNVMEControllerSharing{ + VirtualNVMEControllerSharingNoSharing, + VirtualNVMEControllerSharingPhysicalSharing, + } +} + +func (e VirtualNVMEControllerSharing) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualNVMEControllerSharing"] = reflect.TypeOf((*VirtualNVMEControllerSharing)(nil)).Elem() minAPIVersionForType["VirtualNVMEControllerSharing"] = "8.0.2.0" @@ -9692,6 +14951,23 @@ const ( VirtualPointingDeviceHostChoicePs2 = VirtualPointingDeviceHostChoice("ps2") ) +func (e VirtualPointingDeviceHostChoice) Values() []VirtualPointingDeviceHostChoice { + return []VirtualPointingDeviceHostChoice{ + VirtualPointingDeviceHostChoiceAutodetect, + VirtualPointingDeviceHostChoiceIntellimouseExplorer, + VirtualPointingDeviceHostChoiceIntellimousePs2, + VirtualPointingDeviceHostChoiceLogitechMouseman, + VirtualPointingDeviceHostChoiceMicrosoft_serial, + VirtualPointingDeviceHostChoiceMouseSystems, + VirtualPointingDeviceHostChoiceMousemanSerial, + VirtualPointingDeviceHostChoicePs2, + } +} + +func (e VirtualPointingDeviceHostChoice) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualPointingDeviceHostChoice"] = reflect.TypeOf((*VirtualPointingDeviceHostChoice)(nil)).Elem() } @@ -9713,6 +14989,18 @@ const ( VirtualSCSISharingPhysicalSharing = VirtualSCSISharing("physicalSharing") ) +func (e VirtualSCSISharing) Values() []VirtualSCSISharing { + return []VirtualSCSISharing{ + VirtualSCSISharingNoSharing, + VirtualSCSISharingVirtualSharing, + VirtualSCSISharingPhysicalSharing, + } +} + +func (e VirtualSCSISharing) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualSCSISharing"] = reflect.TypeOf((*VirtualSCSISharing)(nil)).Elem() } @@ -9739,10 +15027,22 @@ const ( VirtualSerialPortEndPointServer = VirtualSerialPortEndPoint("server") ) +func (e VirtualSerialPortEndPoint) Values() []VirtualSerialPortEndPoint { + return []VirtualSerialPortEndPoint{ + VirtualSerialPortEndPointClient, + VirtualSerialPortEndPointServer, + } +} + +func (e VirtualSerialPortEndPoint) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualSerialPortEndPoint"] = reflect.TypeOf((*VirtualSerialPortEndPoint)(nil)).Elem() } +// The enumeration of all known valid VRDMA device protocols. type VirtualVmxnet3VrdmaOptionDeviceProtocols string const ( @@ -9752,9 +15052,19 @@ const ( VirtualVmxnet3VrdmaOptionDeviceProtocolsRocev2 = VirtualVmxnet3VrdmaOptionDeviceProtocols("rocev2") ) +func (e VirtualVmxnet3VrdmaOptionDeviceProtocols) Values() []VirtualVmxnet3VrdmaOptionDeviceProtocols { + return []VirtualVmxnet3VrdmaOptionDeviceProtocols{ + VirtualVmxnet3VrdmaOptionDeviceProtocolsRocev1, + VirtualVmxnet3VrdmaOptionDeviceProtocolsRocev2, + } +} + +func (e VirtualVmxnet3VrdmaOptionDeviceProtocols) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VirtualVmxnet3VrdmaOptionDeviceProtocols"] = reflect.TypeOf((*VirtualVmxnet3VrdmaOptionDeviceProtocols)(nil)).Elem() - minAPIVersionForType["VirtualVmxnet3VrdmaOptionDeviceProtocols"] = "6.7" } type VmDasBeingResetEventReasonCode string @@ -9770,15 +15080,24 @@ const ( VmDasBeingResetEventReasonCodeVmcpResetApdCleared = VmDasBeingResetEventReasonCode("vmcpResetApdCleared") ) -func init() { - t["VmDasBeingResetEventReasonCode"] = reflect.TypeOf((*VmDasBeingResetEventReasonCode)(nil)).Elem() - minAPIVersionForType["VmDasBeingResetEventReasonCode"] = "4.1" - minAPIVersionForEnumValue["VmDasBeingResetEventReasonCode"] = map[string]string{ - "appImmediateResetRequest": "5.5", - "vmcpResetApdCleared": "6.0", +func (e VmDasBeingResetEventReasonCode) Values() []VmDasBeingResetEventReasonCode { + return []VmDasBeingResetEventReasonCode{ + VmDasBeingResetEventReasonCodeVmtoolsHeartbeatFailure, + VmDasBeingResetEventReasonCodeAppHeartbeatFailure, + VmDasBeingResetEventReasonCodeAppImmediateResetRequest, + VmDasBeingResetEventReasonCodeVmcpResetApdCleared, } } +func (e VmDasBeingResetEventReasonCode) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VmDasBeingResetEventReasonCode"] = reflect.TypeOf((*VmDasBeingResetEventReasonCode)(nil)).Elem() +} + +// The reason for the failure. type VmFailedStartingSecondaryEventFailureReason string const ( @@ -9796,9 +15115,21 @@ const ( VmFailedStartingSecondaryEventFailureReasonMigrateFailed = VmFailedStartingSecondaryEventFailureReason("migrateFailed") ) +func (e VmFailedStartingSecondaryEventFailureReason) Values() []VmFailedStartingSecondaryEventFailureReason { + return []VmFailedStartingSecondaryEventFailureReason{ + VmFailedStartingSecondaryEventFailureReasonIncompatibleHost, + VmFailedStartingSecondaryEventFailureReasonLoginFailed, + VmFailedStartingSecondaryEventFailureReasonRegisterVmFailed, + VmFailedStartingSecondaryEventFailureReasonMigrateFailed, + } +} + +func (e VmFailedStartingSecondaryEventFailureReason) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VmFailedStartingSecondaryEventFailureReason"] = reflect.TypeOf((*VmFailedStartingSecondaryEventFailureReason)(nil)).Elem() - minAPIVersionForType["VmFailedStartingSecondaryEventFailureReason"] = "4.0" } type VmFaultToleranceConfigIssueReasonForIssue string @@ -9809,16 +15140,12 @@ const ( // There is already a secondary virtual machine for the primary // virtual machine VmFaultToleranceConfigIssueReasonForIssueMoreThanOneSecondary = VmFaultToleranceConfigIssueReasonForIssue("moreThanOneSecondary") - // - // // Deprecated as of vSphere API 6.0. // // The virtual machine does not support record/replay. // // Vm::Capability.RecordReplaySupported is false. VmFaultToleranceConfigIssueReasonForIssueRecordReplayNotSupported = VmFaultToleranceConfigIssueReasonForIssue("recordReplayNotSupported") - // - // // Deprecated as of vSphere API 6.0. // // It is not possible to turn on Fault Tolerance on this powered-on VM. @@ -9856,8 +15183,7 @@ const ( // The virtual machine is an ESX agent VM VmFaultToleranceConfigIssueReasonForIssueEsxAgentVm = VmFaultToleranceConfigIssueReasonForIssue("esxAgentVm") // The virtual machine video device has 3D enabled - VmFaultToleranceConfigIssueReasonForIssueVideo3dEnabled = VmFaultToleranceConfigIssueReasonForIssue("video3dEnabled") - // `**Since:**` vSphere API Release 5.1 + VmFaultToleranceConfigIssueReasonForIssueVideo3dEnabled = VmFaultToleranceConfigIssueReasonForIssue("video3dEnabled") VmFaultToleranceConfigIssueReasonForIssueHasUnsupportedDisk = VmFaultToleranceConfigIssueReasonForIssue("hasUnsupportedDisk") // FT logging nic does not have desired bandwidth VmFaultToleranceConfigIssueReasonForIssueInsufficientBandwidth = VmFaultToleranceConfigIssueReasonForIssue("insufficientBandwidth") @@ -9885,28 +15211,88 @@ const ( // The host does not support fault tolerance virtual machines // with the specified amount of memory. VmFaultToleranceConfigIssueReasonForIssueTooMuchMemory = VmFaultToleranceConfigIssueReasonForIssue("tooMuchMemory") + // No VMotion license + VmFaultToleranceConfigIssueReasonForIssueVMotionNotLicensed = VmFaultToleranceConfigIssueReasonForIssue("vMotionNotLicensed") + // Host does not have proper FT license + VmFaultToleranceConfigIssueReasonForIssueFtNotLicensed = VmFaultToleranceConfigIssueReasonForIssue("ftNotLicensed") + // Host does not have HA agent running properly + VmFaultToleranceConfigIssueReasonForIssueHaAgentIssue = VmFaultToleranceConfigIssueReasonForIssue("haAgentIssue") + // The VM has unsupported storage policy + VmFaultToleranceConfigIssueReasonForIssueUnsupportedSPBM = VmFaultToleranceConfigIssueReasonForIssue("unsupportedSPBM") + // The virtual machine has virtual disk in linked-clone mode + VmFaultToleranceConfigIssueReasonForIssueHasLinkedCloneDisk = VmFaultToleranceConfigIssueReasonForIssue("hasLinkedCloneDisk") // Virtual Machine with Pmem HA Failover is not supported VmFaultToleranceConfigIssueReasonForIssueUnsupportedPMemHAFailOver = VmFaultToleranceConfigIssueReasonForIssue("unsupportedPMemHAFailOver") + // Virtual Machine with encrypted virtual disk is not supported. + VmFaultToleranceConfigIssueReasonForIssueUnsupportedEncryptedDisk = VmFaultToleranceConfigIssueReasonForIssue("unsupportedEncryptedDisk") + // The virtual machine does not allow to enable or disable FT Metro + // Cluster while FT is turned on. + VmFaultToleranceConfigIssueReasonForIssueFtMetroClusterNotEditable = VmFaultToleranceConfigIssueReasonForIssue("ftMetroClusterNotEditable") + // Cannot turn on vSphere Fault Tolerance on a FT Metro Cluster enabled VM + // with no Host Group configured. + VmFaultToleranceConfigIssueReasonForIssueNoHostGroupConfigured = VmFaultToleranceConfigIssueReasonForIssue("noHostGroupConfigured") ) +func (e VmFaultToleranceConfigIssueReasonForIssue) Values() []VmFaultToleranceConfigIssueReasonForIssue { + return []VmFaultToleranceConfigIssueReasonForIssue{ + VmFaultToleranceConfigIssueReasonForIssueHaNotEnabled, + VmFaultToleranceConfigIssueReasonForIssueMoreThanOneSecondary, + VmFaultToleranceConfigIssueReasonForIssueRecordReplayNotSupported, + VmFaultToleranceConfigIssueReasonForIssueReplayNotSupported, + VmFaultToleranceConfigIssueReasonForIssueTemplateVm, + VmFaultToleranceConfigIssueReasonForIssueMultipleVCPU, + VmFaultToleranceConfigIssueReasonForIssueHostInactive, + VmFaultToleranceConfigIssueReasonForIssueFtUnsupportedHardware, + VmFaultToleranceConfigIssueReasonForIssueFtUnsupportedProduct, + VmFaultToleranceConfigIssueReasonForIssueMissingVMotionNic, + VmFaultToleranceConfigIssueReasonForIssueMissingFTLoggingNic, + VmFaultToleranceConfigIssueReasonForIssueThinDisk, + VmFaultToleranceConfigIssueReasonForIssueVerifySSLCertificateFlagNotSet, + VmFaultToleranceConfigIssueReasonForIssueHasSnapshots, + VmFaultToleranceConfigIssueReasonForIssueNoConfig, + VmFaultToleranceConfigIssueReasonForIssueFtSecondaryVm, + VmFaultToleranceConfigIssueReasonForIssueHasLocalDisk, + VmFaultToleranceConfigIssueReasonForIssueEsxAgentVm, + VmFaultToleranceConfigIssueReasonForIssueVideo3dEnabled, + VmFaultToleranceConfigIssueReasonForIssueHasUnsupportedDisk, + VmFaultToleranceConfigIssueReasonForIssueInsufficientBandwidth, + VmFaultToleranceConfigIssueReasonForIssueHasNestedHVConfiguration, + VmFaultToleranceConfigIssueReasonForIssueHasVFlashConfiguration, + VmFaultToleranceConfigIssueReasonForIssueUnsupportedProduct, + VmFaultToleranceConfigIssueReasonForIssueCpuHvUnsupported, + VmFaultToleranceConfigIssueReasonForIssueCpuHwmmuUnsupported, + VmFaultToleranceConfigIssueReasonForIssueCpuHvDisabled, + VmFaultToleranceConfigIssueReasonForIssueHasEFIFirmware, + VmFaultToleranceConfigIssueReasonForIssueTooManyVCPUs, + VmFaultToleranceConfigIssueReasonForIssueTooMuchMemory, + VmFaultToleranceConfigIssueReasonForIssueVMotionNotLicensed, + VmFaultToleranceConfigIssueReasonForIssueFtNotLicensed, + VmFaultToleranceConfigIssueReasonForIssueHaAgentIssue, + VmFaultToleranceConfigIssueReasonForIssueUnsupportedSPBM, + VmFaultToleranceConfigIssueReasonForIssueHasLinkedCloneDisk, + VmFaultToleranceConfigIssueReasonForIssueUnsupportedPMemHAFailOver, + VmFaultToleranceConfigIssueReasonForIssueUnsupportedEncryptedDisk, + VmFaultToleranceConfigIssueReasonForIssueFtMetroClusterNotEditable, + VmFaultToleranceConfigIssueReasonForIssueNoHostGroupConfigured, + } +} + +func (e VmFaultToleranceConfigIssueReasonForIssue) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VmFaultToleranceConfigIssueReasonForIssue"] = reflect.TypeOf((*VmFaultToleranceConfigIssueReasonForIssue)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceConfigIssueReasonForIssue"] = "4.0" minAPIVersionForEnumValue["VmFaultToleranceConfigIssueReasonForIssue"] = map[string]string{ - "esxAgentVm": "5.0", - "video3dEnabled": "5.0", - "hasUnsupportedDisk": "5.1", - "insufficientBandwidth": "6.0", - "hasNestedHVConfiguration": "5.1", - "hasVFlashConfiguration": "5.5", - "unsupportedProduct": "6.0", - "cpuHvUnsupported": "6.0", - "cpuHwmmuUnsupported": "6.0", - "cpuHvDisabled": "6.0", - "hasEFIFirmware": "6.0", - "tooManyVCPUs": "6.7", - "tooMuchMemory": "6.7", + "vMotionNotLicensed": "8.0.3.0", + "ftNotLicensed": "8.0.3.0", + "haAgentIssue": "8.0.3.0", + "unsupportedSPBM": "8.0.3.0", + "hasLinkedCloneDisk": "8.0.3.0", "unsupportedPMemHAFailOver": "7.0.2.0", + "unsupportedEncryptedDisk": "8.0.3.0", + "ftMetroClusterNotEditable": "8.0.3.0", + "noHostGroupConfigured": "8.0.3.0", } } @@ -9925,9 +15311,22 @@ const ( VmFaultToleranceInvalidFileBackingDeviceTypeVirtualDisk = VmFaultToleranceInvalidFileBackingDeviceType("virtualDisk") ) +func (e VmFaultToleranceInvalidFileBackingDeviceType) Values() []VmFaultToleranceInvalidFileBackingDeviceType { + return []VmFaultToleranceInvalidFileBackingDeviceType{ + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualFloppy, + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualCdrom, + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualSerialPort, + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualParallelPort, + VmFaultToleranceInvalidFileBackingDeviceTypeVirtualDisk, + } +} + +func (e VmFaultToleranceInvalidFileBackingDeviceType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VmFaultToleranceInvalidFileBackingDeviceType"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBackingDeviceType)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceInvalidFileBackingDeviceType"] = "4.0" } type VmShutdownOnIsolationEventOperation string @@ -9939,11 +15338,22 @@ const ( VmShutdownOnIsolationEventOperationPoweredOff = VmShutdownOnIsolationEventOperation("poweredOff") ) -func init() { - t["VmShutdownOnIsolationEventOperation"] = reflect.TypeOf((*VmShutdownOnIsolationEventOperation)(nil)).Elem() - minAPIVersionForType["VmShutdownOnIsolationEventOperation"] = "4.0" +func (e VmShutdownOnIsolationEventOperation) Values() []VmShutdownOnIsolationEventOperation { + return []VmShutdownOnIsolationEventOperation{ + VmShutdownOnIsolationEventOperationShutdown, + VmShutdownOnIsolationEventOperationPoweredOff, + } } +func (e VmShutdownOnIsolationEventOperation) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VmShutdownOnIsolationEventOperation"] = reflect.TypeOf((*VmShutdownOnIsolationEventOperation)(nil)).Elem() +} + +// The PVLAN port types. type VmwareDistributedVirtualSwitchPvlanPortType string const ( @@ -9961,11 +15371,23 @@ const ( VmwareDistributedVirtualSwitchPvlanPortTypeCommunity = VmwareDistributedVirtualSwitchPvlanPortType("community") ) -func init() { - t["VmwareDistributedVirtualSwitchPvlanPortType"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanPortType)(nil)).Elem() - minAPIVersionForType["VmwareDistributedVirtualSwitchPvlanPortType"] = "4.0" +func (e VmwareDistributedVirtualSwitchPvlanPortType) Values() []VmwareDistributedVirtualSwitchPvlanPortType { + return []VmwareDistributedVirtualSwitchPvlanPortType{ + VmwareDistributedVirtualSwitchPvlanPortTypePromiscuous, + VmwareDistributedVirtualSwitchPvlanPortTypeIsolated, + VmwareDistributedVirtualSwitchPvlanPortTypeCommunity, + } } +func (e VmwareDistributedVirtualSwitchPvlanPortType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VmwareDistributedVirtualSwitchPvlanPortType"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanPortType)(nil)).Elem() +} + +// The list of disk issues. type VsanDiskIssueType string const ( @@ -9974,12 +15396,24 @@ const ( VsanDiskIssueTypeUnknown = VsanDiskIssueType("unknown") ) +func (e VsanDiskIssueType) Values() []VsanDiskIssueType { + return []VsanDiskIssueType{ + VsanDiskIssueTypeNonExist, + VsanDiskIssueTypeStampMismatch, + VsanDiskIssueTypeUnknown, + } +} + +func (e VsanDiskIssueType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VsanDiskIssueType"] = reflect.TypeOf((*VsanDiskIssueType)(nil)).Elem() - minAPIVersionForType["VsanDiskIssueType"] = "5.5" } // The action to take with regard to storage objects upon decommissioning +// a host from use with the VSAN service. type VsanHostDecommissionModeObjectAction string const ( @@ -9993,12 +15427,25 @@ const ( VsanHostDecommissionModeObjectActionEvacuateAllData = VsanHostDecommissionModeObjectAction("evacuateAllData") ) +func (e VsanHostDecommissionModeObjectAction) Values() []VsanHostDecommissionModeObjectAction { + return []VsanHostDecommissionModeObjectAction{ + VsanHostDecommissionModeObjectActionNoAction, + VsanHostDecommissionModeObjectActionEnsureObjectAccessibility, + VsanHostDecommissionModeObjectActionEvacuateAllData, + } +} + +func (e VsanHostDecommissionModeObjectAction) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VsanHostDecommissionModeObjectAction"] = reflect.TypeOf((*VsanHostDecommissionModeObjectAction)(nil)).Elem() - minAPIVersionForType["VsanHostDecommissionModeObjectAction"] = "5.5" } // Values used for indicating a disk's status for use by the VSAN service. +// +// See also `VsanHostDiskResult.state`. type VsanHostDiskResultState string const ( @@ -10021,13 +15468,26 @@ const ( VsanHostDiskResultStateIneligible = VsanHostDiskResultState("ineligible") ) +func (e VsanHostDiskResultState) Values() []VsanHostDiskResultState { + return []VsanHostDiskResultState{ + VsanHostDiskResultStateInUse, + VsanHostDiskResultStateEligible, + VsanHostDiskResultStateIneligible, + } +} + +func (e VsanHostDiskResultState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VsanHostDiskResultState"] = reflect.TypeOf((*VsanHostDiskResultState)(nil)).Elem() - minAPIVersionForType["VsanHostDiskResultState"] = "5.5" } // A `VsanHostHealthState_enum` represents the state of a participating // host in the VSAN service. +// +// See also `VsanHostClusterStatus`. type VsanHostHealthState string const ( @@ -10039,13 +15499,26 @@ const ( VsanHostHealthStateUnhealthy = VsanHostHealthState("unhealthy") ) +func (e VsanHostHealthState) Values() []VsanHostHealthState { + return []VsanHostHealthState{ + VsanHostHealthStateUnknown, + VsanHostHealthStateHealthy, + VsanHostHealthStateUnhealthy, + } +} + +func (e VsanHostHealthState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VsanHostHealthState"] = reflect.TypeOf((*VsanHostHealthState)(nil)).Elem() - minAPIVersionForType["VsanHostHealthState"] = "5.5" } // A `VsanHostNodeState_enum` represents the state of participation of a host // in the VSAN service. +// +// See also `VsanHostClusterStatus`, `VsanHostClusterStatusState`. type VsanHostNodeState string const ( @@ -10081,11 +15554,30 @@ const ( VsanHostNodeStateDecommissioning = VsanHostNodeState("decommissioning") ) -func init() { - t["VsanHostNodeState"] = reflect.TypeOf((*VsanHostNodeState)(nil)).Elem() - minAPIVersionForType["VsanHostNodeState"] = "5.5" +func (e VsanHostNodeState) Values() []VsanHostNodeState { + return []VsanHostNodeState{ + VsanHostNodeStateError, + VsanHostNodeStateDisabled, + VsanHostNodeStateAgent, + VsanHostNodeStateMaster, + VsanHostNodeStateBackup, + VsanHostNodeStateStarting, + VsanHostNodeStateStopping, + VsanHostNodeStateEnteringMaintenanceMode, + VsanHostNodeStateExitingMaintenanceMode, + VsanHostNodeStateDecommissioning, + } } +func (e VsanHostNodeState) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + +func init() { + t["VsanHostNodeState"] = reflect.TypeOf((*VsanHostNodeState)(nil)).Elem() +} + +// Type of disk group operation performed. type VsanUpgradeSystemUpgradeHistoryDiskGroupOpType string const ( @@ -10095,9 +15587,19 @@ const ( VsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeRemove = VsanUpgradeSystemUpgradeHistoryDiskGroupOpType("remove") ) +func (e VsanUpgradeSystemUpgradeHistoryDiskGroupOpType) Values() []VsanUpgradeSystemUpgradeHistoryDiskGroupOpType { + return []VsanUpgradeSystemUpgradeHistoryDiskGroupOpType{ + VsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeAdd, + VsanUpgradeSystemUpgradeHistoryDiskGroupOpTypeRemove, + } +} + +func (e VsanUpgradeSystemUpgradeHistoryDiskGroupOpType) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["VsanUpgradeSystemUpgradeHistoryDiskGroupOpType"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryDiskGroupOpType)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemUpgradeHistoryDiskGroupOpType"] = "6.0" } type WeekOfMonth string @@ -10110,6 +15612,20 @@ const ( WeekOfMonthLast = WeekOfMonth("last") ) +func (e WeekOfMonth) Values() []WeekOfMonth { + return []WeekOfMonth{ + WeekOfMonthFirst, + WeekOfMonthSecond, + WeekOfMonthThird, + WeekOfMonthFourth, + WeekOfMonthLast, + } +} + +func (e WeekOfMonth) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["WeekOfMonth"] = reflect.TypeOf((*WeekOfMonth)(nil)).Elem() } @@ -10123,9 +15639,19 @@ const ( WillLoseHAProtectionResolutionRelocate = WillLoseHAProtectionResolution("relocate") ) +func (e WillLoseHAProtectionResolution) Values() []WillLoseHAProtectionResolution { + return []WillLoseHAProtectionResolution{ + WillLoseHAProtectionResolutionSvmotion, + WillLoseHAProtectionResolutionRelocate, + } +} + +func (e WillLoseHAProtectionResolution) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["WillLoseHAProtectionResolution"] = reflect.TypeOf((*WillLoseHAProtectionResolution)(nil)).Elem() - minAPIVersionForType["WillLoseHAProtectionResolution"] = "5.0" } type VslmDiskInfoFlag string @@ -10154,6 +15680,36 @@ const ( VslmDiskInfoFlagCbtEnabled = VslmDiskInfoFlag("cbtEnabled") ) +func (e VslmDiskInfoFlag) Values() []VslmDiskInfoFlag { + return []VslmDiskInfoFlag{ + VslmDiskInfoFlagId, + VslmDiskInfoFlagDescriptorVersion, + VslmDiskInfoFlagBackingObjectId, + VslmDiskInfoFlagPath, + VslmDiskInfoFlagParentPath, + VslmDiskInfoFlagName, + VslmDiskInfoFlagDeviceName, + VslmDiskInfoFlagCapacity, + VslmDiskInfoFlagAllocated, + VslmDiskInfoFlagType, + VslmDiskInfoFlagConsumers, + VslmDiskInfoFlagTentativeState, + VslmDiskInfoFlagCreateTime, + VslmDiskInfoFlagIoFilter, + VslmDiskInfoFlagControlFlags, + VslmDiskInfoFlagKeepAfterVmDelete, + VslmDiskInfoFlagRelocationDisabled, + VslmDiskInfoFlagKeyId, + VslmDiskInfoFlagKeyProviderId, + VslmDiskInfoFlagNativeSnapshotSupported, + VslmDiskInfoFlagCbtEnabled, + } +} + +func (e VslmDiskInfoFlag) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["vslmDiskInfoFlag"] = reflect.TypeOf((*VslmDiskInfoFlag)(nil)).Elem() } @@ -10166,6 +15722,18 @@ const ( VslmVStorageObjectControlFlagEnableChangedBlockTracking = VslmVStorageObjectControlFlag("enableChangedBlockTracking") ) +func (e VslmVStorageObjectControlFlag) Values() []VslmVStorageObjectControlFlag { + return []VslmVStorageObjectControlFlag{ + VslmVStorageObjectControlFlagKeepAfterDeleteVm, + VslmVStorageObjectControlFlagDisableRelocation, + VslmVStorageObjectControlFlagEnableChangedBlockTracking, + } +} + +func (e VslmVStorageObjectControlFlag) Strings() []string { + return EnumValuesAsStrings(e.Values()) +} + func init() { t["vslmVStorageObjectControlFlag"] = reflect.TypeOf((*VslmVStorageObjectControlFlag)(nil)).Elem() } diff --git a/vendor/github.com/vmware/govmomi/vim25/types/hardware_version.go b/vendor/github.com/vmware/govmomi/vim25/types/hardware_version.go index 396775771..f4489fe9e 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/hardware_version.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/hardware_version.go @@ -25,6 +25,10 @@ import ( // HardwareVersion is a VMX hardware version. type HardwareVersion uint8 +const ( + invalidHardwareVersion HardwareVersion = 0 +) + const ( VMX3 HardwareVersion = iota + 3 VMX4 @@ -59,12 +63,24 @@ const ( MaxValidHardwareVersion = VMX21 ) -func (hv HardwareVersion) IsValid() bool { - return hv != vmx5 && +// IsSupported returns true if the hardware version is known to and supported by +// GoVmomi's generated types. +func (hv HardwareVersion) IsSupported() bool { + return hv.IsValid() && + hv != vmx5 && hv >= MinValidHardwareVersion && hv <= MaxValidHardwareVersion } +// IsValid returns true if the hardware version is not valid. +// Unlike IsSupported, this function returns true as long as the hardware +// version is greater than 0. +// For example, the result of parsing "abc" or "vmx-abc" is an invalid hardware +// version, whereas the result of parsing "vmx-99" is valid, just not supported. +func (hv HardwareVersion) IsValid() bool { + return hv != invalidHardwareVersion +} + func (hv HardwareVersion) String() string { if hv.IsValid() { return fmt.Sprintf("vmx-%d", hv) @@ -85,7 +101,10 @@ func (hv *HardwareVersion) UnmarshalText(text []byte) error { return nil } -var vmxRe = regexp.MustCompile(`(?i)^vmx-(\d+)$`) +var ( + vmxRe = regexp.MustCompile(`(?i)^vmx-(\d+)$`) + vmxNumOnlyRe = regexp.MustCompile(`^(\d+)$`) +) // MustParseHardwareVersion parses the provided string into a hardware version. func MustParseHardwareVersion(s string) HardwareVersion { @@ -97,25 +116,35 @@ func MustParseHardwareVersion(s string) HardwareVersion { } // ParseHardwareVersion parses the provided string into a hardware version. +// Supported formats include vmx-123 or 123. Please note that the parser will +// only return an error if the supplied version does not match the supported +// formats. +// Once parsed, use the function IsSupported to determine if the hardware +// version falls into the range of versions known to GoVmomi. func ParseHardwareVersion(s string) (HardwareVersion, error) { - var u uint64 if m := vmxRe.FindStringSubmatch(s); len(m) > 0 { - u, _ = strconv.ParseUint(m[1], 10, 8) - } else { - u, _ = strconv.ParseUint(s, 10, 8) + u, err := strconv.ParseUint(m[1], 10, 8) + if err != nil { + return invalidHardwareVersion, fmt.Errorf( + "failed to parse %s from %q as uint8: %w", m[1], s, err) + } + return HardwareVersion(u), nil + } else if m := vmxNumOnlyRe.FindStringSubmatch(s); len(m) > 0 { + u, err := strconv.ParseUint(m[1], 10, 8) + if err != nil { + return invalidHardwareVersion, fmt.Errorf( + "failed to parse %s as uint8: %w", m[1], err) + } + return HardwareVersion(u), nil } - v := HardwareVersion(u) - if !v.IsValid() { - return 0, fmt.Errorf("invalid version: %q", s) - } - return v, nil + return invalidHardwareVersion, fmt.Errorf("invalid version: %q", s) } var hardwareVersions []HardwareVersion func init() { for i := MinValidHardwareVersion; i <= MaxValidHardwareVersion; i++ { - if i.IsValid() { + if i.IsSupported() { hardwareVersions = append(hardwareVersions, i) } } diff --git a/vendor/github.com/vmware/govmomi/vim25/types/helpers.go b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go index 70360eb4f..94fb50df4 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/helpers.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/helpers.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2015-2022 VMware, Inc. All Rights Reserved. +Copyright (c) 2015-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -23,6 +23,14 @@ import ( "time" ) +func EnumValuesAsStrings[T ~string](enumValues []T) []string { + stringValues := make([]string, len(enumValues)) + for i := range enumValues { + stringValues[i] = string(enumValues[i]) + } + return stringValues +} + func NewBool(v bool) *bool { return &v } @@ -120,6 +128,7 @@ func (ci VirtualMachineConfigInfo) ToConfigSpec() VirtualMachineConfigSpec { Flags: &ci.Flags, ConsolePreferences: ci.ConsolePreferences, PowerOpInfo: &ci.DefaultPowerOps, + RebootPowerOff: ci.RebootPowerOff, NumCPUs: ci.Hardware.NumCPU, VcpuConfig: ci.VcpuConfig, NumCoresPerSocket: ci.Hardware.NumCoresPerSocket, @@ -129,14 +138,14 @@ func (ci VirtualMachineConfigInfo) ToConfigSpec() VirtualMachineConfigSpec { CpuHotRemoveEnabled: ci.CpuHotRemoveEnabled, VirtualICH7MPresent: ci.Hardware.VirtualICH7MPresent, VirtualSMCPresent: ci.Hardware.VirtualSMCPresent, - DeviceChange: make([]BaseVirtualDeviceConfigSpec, len(ci.Hardware.Device)), + DeviceChange: nil, // See below CpuAllocation: ci.CpuAllocation, MemoryAllocation: ci.MemoryAllocation, LatencySensitivity: ci.LatencySensitivity, CpuAffinity: ci.CpuAffinity, MemoryAffinity: ci.MemoryAffinity, NetworkShaper: ci.NetworkShaper, - CpuFeatureMask: make([]VirtualMachineCpuIdInfoSpec, len(ci.CpuFeatureMask)), + CpuFeatureMask: nil, // See below ExtraConfig: ci.ExtraConfig, SwapPlacement: ci.SwapPlacement, BootOptions: ci.BootOptions, @@ -155,20 +164,21 @@ func (ci VirtualMachineConfigInfo) ToConfigSpec() VirtualMachineConfigSpec { MigrateEncryption: ci.MigrateEncryption, FtEncryptionMode: ci.FtEncryptionMode, SevEnabled: ci.SevEnabled, - PmemFailoverEnabled: ci.PmemFailoverEnabled, - Pmem: ci.Pmem, - NpivWorldWideNameOp: ci.NpivWorldWideNameType, - RebootPowerOff: ci.RebootPowerOff, + MotherboardLayout: ci.Hardware.MotherboardLayout, ScheduledHardwareUpgradeInfo: ci.ScheduledHardwareUpgradeInfo, SgxInfo: ci.SgxInfo, GuestMonitoringModeInfo: ci.GuestMonitoringModeInfo, + PmemFailoverEnabled: ci.PmemFailoverEnabled, VmxStatsCollectionEnabled: ci.VmxStatsCollectionEnabled, VmOpNotificationToAppEnabled: ci.VmOpNotificationToAppEnabled, VmOpNotificationTimeout: ci.VmOpNotificationTimeout, DeviceSwap: ci.DeviceSwap, SimultaneousThreads: ci.Hardware.SimultaneousThreads, + Pmem: ci.Pmem, DeviceGroups: ci.DeviceGroups, - MotherboardLayout: ci.Hardware.MotherboardLayout, + FixedPassthruHotPlugEnabled: ci.FixedPassthruHotPlugEnabled, + MetroFtEnabled: ci.MetroFtEnabled, + MetroFtHostGroup: ci.MetroFtHostGroup, } // Unassign the Files field if all of its fields are empty. @@ -210,39 +220,36 @@ func (ci VirtualMachineConfigInfo) ToConfigSpec() VirtualMachineConfigSpec { cs.PowerOpInfo = nil } - for i := 0; i < len(cs.CpuFeatureMask); i++ { - cs.CpuFeatureMask[i] = VirtualMachineCpuIdInfoSpec{ - ArrayUpdateSpec: ArrayUpdateSpec{ - Operation: ArrayUpdateOperationAdd, - }, - Info: &HostCpuIdInfo{ - // TODO: Does DynamicData need to be copied? - // It is an empty struct... - Level: ci.CpuFeatureMask[i].Level, - Vendor: ci.CpuFeatureMask[i].Vendor, - Eax: ci.CpuFeatureMask[i].Eax, - Ebx: ci.CpuFeatureMask[i].Ebx, - Ecx: ci.CpuFeatureMask[i].Ecx, - Edx: ci.CpuFeatureMask[i].Edx, - }, + if l := len(ci.CpuFeatureMask); l > 0 { + cs.CpuFeatureMask = make([]VirtualMachineCpuIdInfoSpec, l) + for i := 0; i < l; i++ { + cs.CpuFeatureMask[i] = VirtualMachineCpuIdInfoSpec{ + ArrayUpdateSpec: ArrayUpdateSpec{ + Operation: ArrayUpdateOperationAdd, + }, + Info: &HostCpuIdInfo{ + Level: ci.CpuFeatureMask[i].Level, + Vendor: ci.CpuFeatureMask[i].Vendor, + Eax: ci.CpuFeatureMask[i].Eax, + Ebx: ci.CpuFeatureMask[i].Ebx, + Ecx: ci.CpuFeatureMask[i].Ecx, + Edx: ci.CpuFeatureMask[i].Edx, + }, + } } } - for i := 0; i < len(cs.DeviceChange); i++ { - cs.DeviceChange[i] = &VirtualDeviceConfigSpec{ - // TODO: Does DynamicData need to be copied? - // It is an empty struct... - Operation: VirtualDeviceConfigSpecOperationAdd, - FileOperation: VirtualDeviceConfigSpecFileOperationCreate, - Device: ci.Hardware.Device[i], - // TODO: It is unclear how the profiles associated with the VM or - // its hardware can be reintroduced/persisted in the - // ConfigSpec. - Profile: nil, - // The backing will come from the device. - Backing: nil, - // TODO: Investigate futher. - FilterSpec: nil, + if l := len(ci.Hardware.Device); l > 0 { + cs.DeviceChange = make([]BaseVirtualDeviceConfigSpec, l) + for i := 0; i < l; i++ { + cs.DeviceChange[i] = &VirtualDeviceConfigSpec{ + Operation: VirtualDeviceConfigSpecOperationAdd, + FileOperation: VirtualDeviceConfigSpecFileOperationCreate, + Device: ci.Hardware.Device[i], + Profile: nil, + Backing: nil, + FilterSpec: nil, + } } } diff --git a/vendor/github.com/vmware/govmomi/vim25/types/if.go b/vendor/github.com/vmware/govmomi/vim25/types/if.go index 938c86b44..e33fb392e 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/if.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/if.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -616,6 +616,28 @@ func init() { t["BaseDVSFeatureCapability"] = reflect.TypeOf((*DVSFeatureCapability)(nil)).Elem() } +func (b *DVSFilterSpecConnecteeSpec) GetDVSFilterSpecConnecteeSpec() *DVSFilterSpecConnecteeSpec { + return b +} + +type BaseDVSFilterSpecConnecteeSpec interface { + GetDVSFilterSpecConnecteeSpec() *DVSFilterSpecConnecteeSpec +} + +func init() { + t["BaseDVSFilterSpecConnecteeSpec"] = reflect.TypeOf((*DVSFilterSpecConnecteeSpec)(nil)).Elem() +} + +func (b *DVSFilterSpecVlanSpec) GetDVSFilterSpecVlanSpec() *DVSFilterSpecVlanSpec { return b } + +type BaseDVSFilterSpecVlanSpec interface { + GetDVSFilterSpecVlanSpec() *DVSFilterSpecVlanSpec +} + +func init() { + t["BaseDVSFilterSpecVlanSpec"] = reflect.TypeOf((*DVSFilterSpecVlanSpec)(nil)).Elem() +} + func (b *DVSHealthCheckCapability) GetDVSHealthCheckCapability() *DVSHealthCheckCapability { return b } type BaseDVSHealthCheckCapability interface { @@ -1790,6 +1812,16 @@ func init() { t["BaseIoFilterInfo"] = reflect.TypeOf((*IoFilterInfo)(nil)).Elem() } +func (b *IoFilterManagerSslTrust) GetIoFilterManagerSslTrust() *IoFilterManagerSslTrust { return b } + +type BaseIoFilterManagerSslTrust interface { + GetIoFilterManagerSslTrust() *IoFilterManagerSslTrust +} + +func init() { + t["BaseIoFilterManagerSslTrust"] = reflect.TypeOf((*IoFilterManagerSslTrust)(nil)).Elem() +} + func (b *IpAddress) GetIpAddress() *IpAddress { return b } type BaseIpAddress interface { diff --git a/vendor/github.com/vmware/govmomi/vim25/types/types.go b/vendor/github.com/vmware/govmomi/vim25/types/types.go index dc9976b5b..0f8dab11a 100644 --- a/vendor/github.com/vmware/govmomi/vim25/types/types.go +++ b/vendor/github.com/vmware/govmomi/vim25/types/types.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2014-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2014-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -117,19 +117,19 @@ type AboutInfo struct { // Operating system type and architecture. // // Examples of values are: - // - "win32-x86" - For x86-based Windows systems. - // - "linux-x86" - For x86-based Linux systems. - // - "vmnix-x86" - For the x86 ESX Server microkernel. - // - "vmnix-arm64" - For the arm64 ESX Server microkernel. + // - "win32-x86" - For x86-based Windows systems. + // - "linux-x86" - For x86-based Linux systems. + // - "vmnix-x86" - For the x86 ESX Server microkernel. + // - "vmnix-arm64" - For the arm64 ESX Server microkernel. OsType string `xml:"osType" json:"osType"` // The product ID is a unique identifier for a product line. // // Examples of values are: - // - "gsx" - For the VMware Server product. - // - "esx" - For the ESX product. - // - "embeddedEsx" - For the ESXi product. - // - "esxio" - For the ESXio product. - // - "vpx" - For the VirtualCenter product. + // - "gsx" - For the VMware Server product. + // - "esx" - For the ESX product. + // - "embeddedEsx" - For the ESXi product. + // - "esxio" - For the ESXio product. + // - "vpx" - For the VirtualCenter product. ProductLineId string `xml:"productLineId" json:"productLineId"` // Indicates whether or not the service instance represents a // standalone host. @@ -140,19 +140,19 @@ type AboutInfo struct { // For example, VirtualCenter offers multi-host management. // // Examples of values are: - // - "VirtualCenter" - For a VirtualCenter instance. - // - "HostAgent" - For host agent on an ESX Server or VMware Server host. + // - "VirtualCenter" - For a VirtualCenter instance. + // - "HostAgent" - For host agent on an ESX Server or VMware Server host. ApiType string `xml:"apiType" json:"apiType"` // The version of the API as a dot-separated string. // // For example, "1.0.0". ApiVersion string `xml:"apiVersion" json:"apiVersion"` // A globally unique identifier associated with this service instance. - InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty" vim:"4.0"` + InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty"` // The license product name - LicenseProductName string `xml:"licenseProductName,omitempty" json:"licenseProductName,omitempty" vim:"4.0"` + LicenseProductName string `xml:"licenseProductName,omitempty" json:"licenseProductName,omitempty"` // The license product version - LicenseProductVersion string `xml:"licenseProductVersion,omitempty" json:"licenseProductVersion,omitempty" vim:"4.0"` + LicenseProductVersion string `xml:"licenseProductVersion,omitempty" json:"licenseProductVersion,omitempty"` } func init() { @@ -190,7 +190,7 @@ type AccountUpdatedEvent struct { Spec BaseHostAccountSpec `xml:"spec,typeattr" json:"spec"` Group bool `xml:"group" json:"group"` // The previous account description - PrevDescription string `xml:"prevDescription,omitempty" json:"prevDescription,omitempty" vim:"6.5"` + PrevDescription string `xml:"prevDescription,omitempty" json:"prevDescription,omitempty"` } func init() { @@ -402,7 +402,6 @@ type ActiveDirectoryFault struct { func init() { t["ActiveDirectoryFault"] = reflect.TypeOf((*ActiveDirectoryFault)(nil)).Elem() - minAPIVersionForType["ActiveDirectoryFault"] = "4.1" } type ActiveDirectoryFaultFault BaseActiveDirectoryFault @@ -423,7 +422,6 @@ type ActiveDirectoryProfile struct { func init() { t["ActiveDirectoryProfile"] = reflect.TypeOf((*ActiveDirectoryProfile)(nil)).Elem() - minAPIVersionForType["ActiveDirectoryProfile"] = "4.1" } // An attempt to enable Enhanced VMotion Compatibility on a cluster, or to @@ -440,7 +438,7 @@ type ActiveVMsBlockingEVC struct { EVCConfigFault // The requested EVC mode. - EvcMode string `xml:"evcMode,omitempty" json:"evcMode,omitempty" vim:"4.0"` + EvcMode string `xml:"evcMode,omitempty" json:"evcMode,omitempty"` // Hosts with active virtual machines that are blocking the operation, // because the hosts expose compatibility-relevant CPU features not present // in the baseline of the requested EVC mode. @@ -452,14 +450,13 @@ type ActiveVMsBlockingEVC struct { // beyond those present in the baseline for that EVC mode. // // Refers instances of `HostSystem`. - Host []ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty" vim:"4.0"` + Host []ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty"` // The names of the hosts in the host array. - HostName []string `xml:"hostName,omitempty" json:"hostName,omitempty" vim:"4.0"` + HostName []string `xml:"hostName,omitempty" json:"hostName,omitempty"` } func init() { t["ActiveVMsBlockingEVC"] = reflect.TypeOf((*ActiveVMsBlockingEVC)(nil)).Elem() - minAPIVersionForType["ActiveVMsBlockingEVC"] = "2.5u2" } type ActiveVMsBlockingEVCFault ActiveVMsBlockingEVC @@ -504,12 +501,12 @@ type AddCustomFieldDefRequestType struct { Name string `xml:"name" json:"name"` // The managed object type to which this field // will apply - MoType string `xml:"moType,omitempty" json:"moType,omitempty" vim:"2.5"` + MoType string `xml:"moType,omitempty" json:"moType,omitempty"` // Privilege policy to apply to FieldDef being // created - FieldDefPolicy *PrivilegePolicyDef `xml:"fieldDefPolicy,omitempty" json:"fieldDefPolicy,omitempty" vim:"2.5"` + FieldDefPolicy *PrivilegePolicyDef `xml:"fieldDefPolicy,omitempty" json:"fieldDefPolicy,omitempty"` // Privilege policy to apply to instances of field - FieldPolicy *PrivilegePolicyDef `xml:"fieldPolicy,omitempty" json:"fieldPolicy,omitempty" vim:"2.5"` + FieldPolicy *PrivilegePolicyDef `xml:"fieldPolicy,omitempty" json:"fieldPolicy,omitempty"` } func init() { @@ -679,7 +676,7 @@ type AddHostRequestType struct { // Refers instance of `ResourcePool`. ResourcePool *ManagedObjectReference `xml:"resourcePool,omitempty" json:"resourcePool,omitempty"` // Provide a licenseKey or licenseKeyType. See `LicenseManager` - License string `xml:"license,omitempty" json:"license,omitempty" vim:"4.0"` + License string `xml:"license,omitempty" json:"license,omitempty"` } func init() { @@ -896,13 +893,13 @@ type AddStandaloneHostRequestType struct { Spec HostConnectSpec `xml:"spec" json:"spec"` // Optionally specify the configuration for the compute // resource that will be created to contain the host. - CompResSpec BaseComputeResourceConfigSpec `xml:"compResSpec,omitempty,typeattr" json:"compResSpec,omitempty" vim:"2.5"` + CompResSpec BaseComputeResourceConfigSpec `xml:"compResSpec,omitempty,typeattr" json:"compResSpec,omitempty"` // Flag to specify whether or not the host should be // connected as soon as it is added. The host will not // be added if a connection attempt is made and fails. AddConnected bool `xml:"addConnected" json:"addConnected"` // Provide a licenseKey or licenseKeyType. See `LicenseManager` - License string `xml:"license,omitempty" json:"license,omitempty" vim:"4.0"` + License string `xml:"license,omitempty" json:"license,omitempty"` } func init() { @@ -970,7 +967,6 @@ type AdminDisabled struct { func init() { t["AdminDisabled"] = reflect.TypeOf((*AdminDisabled)(nil)).Elem() - minAPIVersionForType["AdminDisabled"] = "2.5" } type AdminDisabledFault AdminDisabled @@ -987,7 +983,6 @@ type AdminNotDisabled struct { func init() { t["AdminNotDisabled"] = reflect.TypeOf((*AdminNotDisabled)(nil)).Elem() - minAPIVersionForType["AdminNotDisabled"] = "2.5" } type AdminNotDisabledFault AdminNotDisabled @@ -1003,7 +998,6 @@ type AdminPasswordNotChangedEvent struct { func init() { t["AdminPasswordNotChangedEvent"] = reflect.TypeOf((*AdminPasswordNotChangedEvent)(nil)).Elem() - minAPIVersionForType["AdminPasswordNotChangedEvent"] = "2.5" } // Virtual machine has a configured memory and/or CPU affinity that will @@ -1056,11 +1050,11 @@ type AgentInstallFailed struct { // The reason why the agent install failed, if known. // // Values should come from `AgentInstallFailedReason_enum`. - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"4.0"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` // The status code returned by the agent installer, if it was run. - StatusCode int32 `xml:"statusCode,omitempty" json:"statusCode,omitempty" vim:"4.0"` + StatusCode int32 `xml:"statusCode,omitempty" json:"statusCode,omitempty"` // The output (stdout/stderr) from executing the agent installer. - InstallerOutput string `xml:"installerOutput,omitempty" json:"installerOutput,omitempty" vim:"4.0"` + InstallerOutput string `xml:"installerOutput,omitempty" json:"installerOutput,omitempty"` } func init() { @@ -1085,7 +1079,6 @@ type AlarmAcknowledgedEvent struct { func init() { t["AlarmAcknowledgedEvent"] = reflect.TypeOf((*AlarmAcknowledgedEvent)(nil)).Elem() - minAPIVersionForType["AlarmAcknowledgedEvent"] = "5.0" } // Action invoked by triggered alarm. @@ -1127,7 +1120,6 @@ type AlarmClearedEvent struct { func init() { t["AlarmClearedEvent"] = reflect.TypeOf((*AlarmClearedEvent)(nil)).Elem() - minAPIVersionForType["AlarmClearedEvent"] = "5.0" } // This event records the creation of an alarm. @@ -1158,11 +1150,11 @@ type AlarmDescription struct { VirtualMachinePowerState []BaseElementDescription `xml:"virtualMachinePowerState,typeattr" json:"virtualMachinePowerState"` // `DatastoreSummary.accessible` and // `description` - DatastoreConnectionState []BaseElementDescription `xml:"datastoreConnectionState,omitempty,typeattr" json:"datastoreConnectionState,omitempty" vim:"4.0"` + DatastoreConnectionState []BaseElementDescription `xml:"datastoreConnectionState,omitempty,typeattr" json:"datastoreConnectionState,omitempty"` // *Host System Power State enum description* - HostSystemPowerState []BaseElementDescription `xml:"hostSystemPowerState,omitempty,typeattr" json:"hostSystemPowerState,omitempty" vim:"4.0"` + HostSystemPowerState []BaseElementDescription `xml:"hostSystemPowerState,omitempty,typeattr" json:"hostSystemPowerState,omitempty"` // *Guest Heartbeat Status enum description* - VirtualMachineGuestHeartbeatStatus []BaseElementDescription `xml:"virtualMachineGuestHeartbeatStatus,omitempty,typeattr" json:"virtualMachineGuestHeartbeatStatus,omitempty" vim:"4.0"` + VirtualMachineGuestHeartbeatStatus []BaseElementDescription `xml:"virtualMachineGuestHeartbeatStatus,omitempty,typeattr" json:"virtualMachineGuestHeartbeatStatus,omitempty"` // *ManagedEntity Status enum description* EntityStatus []BaseElementDescription `xml:"entityStatus,typeattr" json:"entityStatus"` // Action class descriptions for an alarm. @@ -1257,7 +1249,6 @@ type AlarmFilterSpec struct { func init() { t["AlarmFilterSpec"] = reflect.TypeOf((*AlarmFilterSpec)(nil)).Elem() - minAPIVersionForType["AlarmFilterSpec"] = "6.7" } // Attributes of an alarm. @@ -1293,7 +1284,7 @@ type AlarmReconfiguredEvent struct { // The entity with which the alarm is registered. Entity ManagedEntityEventArgument `xml:"entity" json:"entity"` // The configuration values changed during the reconfiguration. - ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { @@ -1418,7 +1409,7 @@ type AlarmSpec struct { // allowed, i.e. when reconfiguring, the name passed in the new AlarmSpec // should be equal to either the systemName or its localized version (the // current name in the Alarm's info). - SystemName string `xml:"systemName,omitempty" json:"systemName,omitempty" vim:"5.0"` + SystemName string `xml:"systemName,omitempty" json:"systemName,omitempty"` // Description of the alarm. Description string `xml:"description" json:"description"` // Flag to indicate whether or not the alarm is enabled or disabled. @@ -1429,7 +1420,7 @@ type AlarmSpec struct { Action BaseAlarmAction `xml:"action,omitempty,typeattr" json:"action,omitempty"` // Frequency in seconds, which specifies how often appropriate actions // should repeat when an alarm does not change state. - ActionFrequency int32 `xml:"actionFrequency,omitempty" json:"actionFrequency,omitempty" vim:"4.0"` + ActionFrequency int32 `xml:"actionFrequency,omitempty" json:"actionFrequency,omitempty"` // Tolerance and maximum frequency settings. Setting *AlarmSetting `xml:"setting,omitempty" json:"setting,omitempty"` } @@ -1470,26 +1461,26 @@ type AlarmState struct { Time time.Time `xml:"time" json:"time"` // Flag to indicate if the alarm's actions have been acknowledged for the // associated ManagedEntity. - Acknowledged *bool `xml:"acknowledged" json:"acknowledged,omitempty" vim:"4.0"` + Acknowledged *bool `xml:"acknowledged" json:"acknowledged,omitempty"` // The user who acknowledged this triggering. // // If the triggering has not // been acknowledged, then the value is not valid. - AcknowledgedByUser string `xml:"acknowledgedByUser,omitempty" json:"acknowledgedByUser,omitempty" vim:"4.0"` + AcknowledgedByUser string `xml:"acknowledgedByUser,omitempty" json:"acknowledgedByUser,omitempty"` // The time this triggering was acknowledged. // // If the triggering has not // been acknowledged, then the value is not valid. - AcknowledgedTime *time.Time `xml:"acknowledgedTime" json:"acknowledgedTime,omitempty" vim:"4.0"` + AcknowledgedTime *time.Time `xml:"acknowledgedTime" json:"acknowledgedTime,omitempty"` // Contains the key of the event that has triggered the alarm. // // The value // is set only for event based alarms. The value is not set for gray or // manually reset alarms (via vim.AlarmManager.setAlarmStatus). - EventKey int32 `xml:"eventKey,omitempty" json:"eventKey,omitempty" vim:"6.0"` + EventKey int32 `xml:"eventKey,omitempty" json:"eventKey,omitempty"` // Flag to indicate if the alarm is disabled for the associated // ManagedEntity. - Disabled *bool `xml:"disabled" json:"disabled,omitempty" vim:"6.9.1"` + Disabled *bool `xml:"disabled" json:"disabled,omitempty"` } func init() { @@ -1529,7 +1520,7 @@ type AlarmTriggeringAction struct { // Indicates on which transitions this action executes and repeats. // // This is optional only for backwards compatibility. - TransitionSpecs []AlarmTriggeringActionTransitionSpec `xml:"transitionSpecs,omitempty" json:"transitionSpecs,omitempty" vim:"4.0"` + TransitionSpecs []AlarmTriggeringActionTransitionSpec `xml:"transitionSpecs,omitempty" json:"transitionSpecs,omitempty"` // Deprecated as of vSphere API 4.0, use // `AlarmTriggeringActionTransitionSpec` . // @@ -1590,7 +1581,6 @@ type AlarmTriggeringActionTransitionSpec struct { func init() { t["AlarmTriggeringActionTransitionSpec"] = reflect.TypeOf((*AlarmTriggeringActionTransitionSpec)(nil)).Elem() - minAPIVersionForType["AlarmTriggeringActionTransitionSpec"] = "4.0" } // This event records that the previously unlicensed virtual machines on @@ -1607,7 +1597,6 @@ type AllVirtualMachinesLicensedEvent struct { func init() { t["AllVirtualMachinesLicensedEvent"] = reflect.TypeOf((*AllVirtualMachinesLicensedEvent)(nil)).Elem() - minAPIVersionForType["AllVirtualMachinesLicensedEvent"] = "2.5" } type AllocateIpv4Address AllocateIpv4AddressRequestType @@ -1757,11 +1746,11 @@ type AndAlarmExpression struct { AlarmExpression // List of alarm expressions that define the overall status of the alarm. - // - The state of the alarm expression is gray if all subexpressions are gray. - // Otherwise, gray subexpressions are ignored. - // - The state is red if all subexpressions are red. - // - Otherwise, the state is yellow if all subexpressions are red or yellow. - // - Otherwise, the state of the alarm expression is green. + // - The state of the alarm expression is gray if all subexpressions are gray. + // Otherwise, gray subexpressions are ignored. + // - The state is red if all subexpressions are red. + // - Otherwise, the state is yellow if all subexpressions are red or yellow. + // - Otherwise, the state of the alarm expression is green. Expression []BaseAlarmExpression `xml:"expression,typeattr" json:"expression"` } @@ -1800,7 +1789,6 @@ type AnswerFile struct { func init() { t["AnswerFile"] = reflect.TypeOf((*AnswerFile)(nil)).Elem() - minAPIVersionForType["AnswerFile"] = "5.0" } // Base class for host-specific answer file options. @@ -1812,12 +1800,11 @@ type AnswerFileCreateSpec struct { // The default if not specified is "true". // This option should be used with caution, since the resulting answer // file will not be checked for errors. - Validating *bool `xml:"validating" json:"validating,omitempty" vim:"6.0"` + Validating *bool `xml:"validating" json:"validating,omitempty"` } func init() { t["AnswerFileCreateSpec"] = reflect.TypeOf((*AnswerFileCreateSpec)(nil)).Elem() - minAPIVersionForType["AnswerFileCreateSpec"] = "5.0" } // The `AnswerFileOptionsCreateSpec` @@ -1831,7 +1818,6 @@ type AnswerFileOptionsCreateSpec struct { func init() { t["AnswerFileOptionsCreateSpec"] = reflect.TypeOf((*AnswerFileOptionsCreateSpec)(nil)).Elem() - minAPIVersionForType["AnswerFileOptionsCreateSpec"] = "5.0" } // The `AnswerFileSerializedCreateSpec` data object @@ -1845,7 +1831,6 @@ type AnswerFileSerializedCreateSpec struct { func init() { t["AnswerFileSerializedCreateSpec"] = reflect.TypeOf((*AnswerFileSerializedCreateSpec)(nil)).Elem() - minAPIVersionForType["AnswerFileSerializedCreateSpec"] = "5.0" } // The `AnswerFileStatusError` data object describes an answer file @@ -1862,7 +1847,6 @@ type AnswerFileStatusError struct { func init() { t["AnswerFileStatusError"] = reflect.TypeOf((*AnswerFileStatusError)(nil)).Elem() - minAPIVersionForType["AnswerFileStatusError"] = "5.0" } // The `AnswerFileStatusResult` data object shows the validity of the @@ -1887,7 +1871,6 @@ type AnswerFileStatusResult struct { func init() { t["AnswerFileStatusResult"] = reflect.TypeOf((*AnswerFileStatusResult)(nil)).Elem() - minAPIVersionForType["AnswerFileStatusResult"] = "5.0" } // Could not update the answer file as it has invalid inputs. @@ -1900,7 +1883,6 @@ type AnswerFileUpdateFailed struct { func init() { t["AnswerFileUpdateFailed"] = reflect.TypeOf((*AnswerFileUpdateFailed)(nil)).Elem() - minAPIVersionForType["AnswerFileUpdateFailed"] = "5.0" } type AnswerFileUpdateFailedFault AnswerFileUpdateFailed @@ -1922,7 +1904,6 @@ type AnswerFileUpdateFailure struct { func init() { t["AnswerFileUpdateFailure"] = reflect.TypeOf((*AnswerFileUpdateFailure)(nil)).Elem() - minAPIVersionForType["AnswerFileUpdateFailure"] = "5.0" } type AnswerVM AnswerVMRequestType @@ -2042,7 +2023,7 @@ type ApplyHostConfigRequestType struct { // `HostProfile*.*HostProfile.ExecuteHostProfile` // method, contained in the `ProfileExecuteResult` object // returned by the method. - UserInput []ProfileDeferredPolicyOptionParameter `xml:"userInput,omitempty" json:"userInput,omitempty" vim:"5.0"` + UserInput []ProfileDeferredPolicyOptionParameter `xml:"userInput,omitempty" json:"userInput,omitempty"` } func init() { @@ -2086,7 +2067,6 @@ type ApplyHostProfileConfigurationResult struct { func init() { t["ApplyHostProfileConfigurationResult"] = reflect.TypeOf((*ApplyHostProfileConfigurationResult)(nil)).Elem() - minAPIVersionForType["ApplyHostProfileConfigurationResult"] = "6.5" } // The data object that contains the objects needed to remediate a host @@ -2129,7 +2109,6 @@ type ApplyHostProfileConfigurationSpec struct { func init() { t["ApplyHostProfileConfigurationSpec"] = reflect.TypeOf((*ApplyHostProfileConfigurationSpec)(nil)).Elem() - minAPIVersionForType["ApplyHostProfileConfigurationSpec"] = "6.5" } // The `ApplyProfile` data object is the base class for all data objects @@ -2151,35 +2130,34 @@ type ApplyProfile struct { // list. Policy []ProfilePolicy `xml:"policy,omitempty" json:"policy,omitempty"` // Identifies the profile type. - ProfileTypeName string `xml:"profileTypeName,omitempty" json:"profileTypeName,omitempty" vim:"5.0"` + ProfileTypeName string `xml:"profileTypeName,omitempty" json:"profileTypeName,omitempty"` // Profile engine version. - ProfileVersion string `xml:"profileVersion,omitempty" json:"profileVersion,omitempty" vim:"5.0"` + ProfileVersion string `xml:"profileVersion,omitempty" json:"profileVersion,omitempty"` // List of subprofiles for this profile. // // This list can change depending on which profile plug-ins are available in the system. // Subprofiles can be nested to arbitrary depths to represent host capabilities. - Property []ProfileApplyProfileProperty `xml:"property,omitempty" json:"property,omitempty" vim:"5.0"` + Property []ProfileApplyProfileProperty `xml:"property,omitempty" json:"property,omitempty"` // Indicates whether this profile is marked as "favorite". - Favorite *bool `xml:"favorite" json:"favorite,omitempty" vim:"6.5"` + Favorite *bool `xml:"favorite" json:"favorite,omitempty"` // Indicates whether this profile is marked as to-be-merged. - ToBeMerged *bool `xml:"toBeMerged" json:"toBeMerged,omitempty" vim:"6.5"` + ToBeMerged *bool `xml:"toBeMerged" json:"toBeMerged,omitempty"` // Indicates whether the selected array elements, with the current // as one of them, replace the profile array in the target host // profile. - ToReplaceWith *bool `xml:"toReplaceWith" json:"toReplaceWith,omitempty" vim:"6.5"` + ToReplaceWith *bool `xml:"toReplaceWith" json:"toReplaceWith,omitempty"` // Indicates whether this profile is marked as to-be-deleted. - ToBeDeleted *bool `xml:"toBeDeleted" json:"toBeDeleted,omitempty" vim:"6.5"` + ToBeDeleted *bool `xml:"toBeDeleted" json:"toBeDeleted,omitempty"` // Indicates that the member variable enabled of this profile // will be copied from source profile to target profiles at host profile // composition. - CopyEnableStatus *bool `xml:"copyEnableStatus" json:"copyEnableStatus,omitempty" vim:"6.5"` + CopyEnableStatus *bool `xml:"copyEnableStatus" json:"copyEnableStatus,omitempty"` // Indicates whether this profile will be displayed or not. - Hidden *bool `xml:"hidden" json:"hidden,omitempty" vim:"6.7"` + Hidden *bool `xml:"hidden" json:"hidden,omitempty"` } func init() { t["ApplyProfile"] = reflect.TypeOf((*ApplyProfile)(nil)).Elem() - minAPIVersionForType["ApplyProfile"] = "4.0" } type ApplyRecommendation ApplyRecommendationRequestType @@ -2273,7 +2251,6 @@ type ApplyStorageRecommendationResult struct { func init() { t["ApplyStorageRecommendationResult"] = reflect.TypeOf((*ApplyStorageRecommendationResult)(nil)).Elem() - minAPIVersionForType["ApplyStorageRecommendationResult"] = "5.0" } type AreAlarmActionsEnabled AreAlarmActionsEnabledRequestType @@ -2454,7 +2431,7 @@ func init() { // A boxed array of `PrimitiveByte`. To be used in `Any` placeholders. type ArrayOfByte struct { - Byte []byte `xml:"byte,omitempty" json:"_value"` + Byte ByteSlice `xml:"byte,omitempty" json:"_value"` } func init() { @@ -2551,6 +2528,16 @@ func init() { t["ArrayOfClusterComputeResourceHostConfigurationInput"] = reflect.TypeOf((*ArrayOfClusterComputeResourceHostConfigurationInput)(nil)).Elem() } +// A boxed array of `ClusterComputeResourceHostEvacuationInfo`. To be used in `Any` placeholders. +type ArrayOfClusterComputeResourceHostEvacuationInfo struct { + ClusterComputeResourceHostEvacuationInfo []ClusterComputeResourceHostEvacuationInfo `xml:"ClusterComputeResourceHostEvacuationInfo,omitempty" json:"_value"` +} + +func init() { + t["ArrayOfClusterComputeResourceHostEvacuationInfo"] = reflect.TypeOf((*ArrayOfClusterComputeResourceHostEvacuationInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfClusterComputeResourceHostEvacuationInfo"] = "8.0.3.0" +} + // A boxed array of `ClusterComputeResourceHostVmkNicInfo`. To be used in `Any` placeholders. type ArrayOfClusterComputeResourceHostVmkNicInfo struct { ClusterComputeResourceHostVmkNicInfo []ClusterComputeResourceHostVmkNicInfo `xml:"ClusterComputeResourceHostVmkNicInfo,omitempty" json:"_value"` @@ -2576,6 +2563,7 @@ type ArrayOfClusterComputeResourceVcsSlots struct { func init() { t["ArrayOfClusterComputeResourceVcsSlots"] = reflect.TypeOf((*ArrayOfClusterComputeResourceVcsSlots)(nil)).Elem() + minAPIVersionForType["ArrayOfClusterComputeResourceVcsSlots"] = "7.0.1.1" } // A boxed array of `ClusterDasAamNodeState`. To be used in `Any` placeholders. @@ -2630,6 +2618,7 @@ type ArrayOfClusterDatastoreUpdateSpec struct { func init() { t["ArrayOfClusterDatastoreUpdateSpec"] = reflect.TypeOf((*ArrayOfClusterDatastoreUpdateSpec)(nil)).Elem() + minAPIVersionForType["ArrayOfClusterDatastoreUpdateSpec"] = "7.0.3.0" } // A boxed array of `ClusterDpmHostConfigInfo`. To be used in `Any` placeholders. @@ -2801,6 +2790,7 @@ type ArrayOfClusterTagCategoryUpdateSpec struct { func init() { t["ArrayOfClusterTagCategoryUpdateSpec"] = reflect.TypeOf((*ArrayOfClusterTagCategoryUpdateSpec)(nil)).Elem() + minAPIVersionForType["ArrayOfClusterTagCategoryUpdateSpec"] = "7.0.3.0" } // A boxed array of `ClusterVmOrchestrationInfo`. To be used in `Any` placeholders. @@ -2909,6 +2899,7 @@ type ArrayOfCryptoManagerHostKeyStatus struct { func init() { t["ArrayOfCryptoManagerHostKeyStatus"] = reflect.TypeOf((*ArrayOfCryptoManagerHostKeyStatus)(nil)).Elem() + minAPIVersionForType["ArrayOfCryptoManagerHostKeyStatus"] = "8.0.1.0" } // A boxed array of `CryptoManagerKmipClusterStatus`. To be used in `Any` placeholders. @@ -3017,6 +3008,7 @@ type ArrayOfDVSManagerPhysicalNicsList struct { func init() { t["ArrayOfDVSManagerPhysicalNicsList"] = reflect.TypeOf((*ArrayOfDVSManagerPhysicalNicsList)(nil)).Elem() + minAPIVersionForType["ArrayOfDVSManagerPhysicalNicsList"] = "8.0.0.1" } // A boxed array of `DVSNetworkResourcePool`. To be used in `Any` placeholders. @@ -3107,6 +3099,7 @@ type ArrayOfDesiredSoftwareSpecComponentSpec struct { func init() { t["ArrayOfDesiredSoftwareSpecComponentSpec"] = reflect.TypeOf((*ArrayOfDesiredSoftwareSpecComponentSpec)(nil)).Elem() + minAPIVersionForType["ArrayOfDesiredSoftwareSpecComponentSpec"] = "7.0.2.0" } // A boxed array of `DiagnosticManagerBundleInfo`. To be used in `Any` placeholders. @@ -3181,6 +3174,16 @@ func init() { t["ArrayOfDistributedVirtualSwitchHostMemberConfigSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberConfigSpec)(nil)).Elem() } +// A boxed array of `DistributedVirtualSwitchHostMemberHostUplinkState`. To be used in `Any` placeholders. +type ArrayOfDistributedVirtualSwitchHostMemberHostUplinkState struct { + DistributedVirtualSwitchHostMemberHostUplinkState []DistributedVirtualSwitchHostMemberHostUplinkState `xml:"DistributedVirtualSwitchHostMemberHostUplinkState,omitempty" json:"_value"` +} + +func init() { + t["ArrayOfDistributedVirtualSwitchHostMemberHostUplinkState"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchHostMemberHostUplinkState)(nil)).Elem() + minAPIVersionForType["ArrayOfDistributedVirtualSwitchHostMemberHostUplinkState"] = "8.0.3.0" +} + // A boxed array of `DistributedVirtualSwitchHostMemberPnicSpec`. To be used in `Any` placeholders. type ArrayOfDistributedVirtualSwitchHostMemberPnicSpec struct { DistributedVirtualSwitchHostMemberPnicSpec []DistributedVirtualSwitchHostMemberPnicSpec `xml:"DistributedVirtualSwitchHostMemberPnicSpec,omitempty" json:"_value"` @@ -3251,6 +3254,7 @@ type ArrayOfDistributedVirtualSwitchNetworkOffloadSpec struct { func init() { t["ArrayOfDistributedVirtualSwitchNetworkOffloadSpec"] = reflect.TypeOf((*ArrayOfDistributedVirtualSwitchNetworkOffloadSpec)(nil)).Elem() + minAPIVersionForType["ArrayOfDistributedVirtualSwitchNetworkOffloadSpec"] = "8.0.0.1" } // A boxed array of `DistributedVirtualSwitchProductSpec`. To be used in `Any` placeholders. @@ -3278,6 +3282,7 @@ type ArrayOfDpuStatusInfo struct { func init() { t["ArrayOfDpuStatusInfo"] = reflect.TypeOf((*ArrayOfDpuStatusInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfDpuStatusInfo"] = "8.0.0.1" } // A boxed array of `DpuStatusInfoOperationalInfo`. To be used in `Any` placeholders. @@ -3287,6 +3292,7 @@ type ArrayOfDpuStatusInfoOperationalInfo struct { func init() { t["ArrayOfDpuStatusInfoOperationalInfo"] = reflect.TypeOf((*ArrayOfDpuStatusInfoOperationalInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfDpuStatusInfoOperationalInfo"] = "8.0.0.1" } // A boxed array of `DvsApplyOperationFaultFaultOnObject`. To be used in `Any` placeholders. @@ -3647,6 +3653,7 @@ type ArrayOfFeatureEVCMode struct { func init() { t["ArrayOfFeatureEVCMode"] = reflect.TypeOf((*ArrayOfFeatureEVCMode)(nil)).Elem() + minAPIVersionForType["ArrayOfFeatureEVCMode"] = "7.0.1.0" } // A boxed array of `FileInfo`. To be used in `Any` placeholders. @@ -3665,6 +3672,7 @@ type ArrayOfFileLockInfo struct { func init() { t["ArrayOfFileLockInfo"] = reflect.TypeOf((*ArrayOfFileLockInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfFileLockInfo"] = "8.0.2.0" } // A boxed array of `FileQuery`. To be used in `Any` placeholders. @@ -4097,6 +4105,7 @@ type ArrayOfHostDvxClass struct { func init() { t["ArrayOfHostDvxClass"] = reflect.TypeOf((*ArrayOfHostDvxClass)(nil)).Elem() + minAPIVersionForType["ArrayOfHostDvxClass"] = "8.0.0.1" } // A boxed array of `HostEventArgument`. To be used in `Any` placeholders. @@ -4367,6 +4376,7 @@ type ArrayOfHostMemoryTierInfo struct { func init() { t["ArrayOfHostMemoryTierInfo"] = reflect.TypeOf((*ArrayOfHostMemoryTierInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfHostMemoryTierInfo"] = "7.0.3.0" } // A boxed array of `HostMultipathInfoLogicalUnit`. To be used in `Any` placeholders. @@ -4549,6 +4559,16 @@ func init() { t["ArrayOfHostOpaqueSwitchPhysicalNicZone"] = reflect.TypeOf((*ArrayOfHostOpaqueSwitchPhysicalNicZone)(nil)).Elem() } +// A boxed array of `HostPartialMaintenanceModeRuntimeInfo`. To be used in `Any` placeholders. +type ArrayOfHostPartialMaintenanceModeRuntimeInfo struct { + HostPartialMaintenanceModeRuntimeInfo []HostPartialMaintenanceModeRuntimeInfo `xml:"HostPartialMaintenanceModeRuntimeInfo,omitempty" json:"_value"` +} + +func init() { + t["ArrayOfHostPartialMaintenanceModeRuntimeInfo"] = reflect.TypeOf((*ArrayOfHostPartialMaintenanceModeRuntimeInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfHostPartialMaintenanceModeRuntimeInfo"] = "8.0.3.0" +} + // A boxed array of `HostPatchManagerStatus`. To be used in `Any` placeholders. type ArrayOfHostPatchManagerStatus struct { HostPatchManagerStatus []HostPatchManagerStatus `xml:"HostPatchManagerStatus,omitempty" json:"_value"` @@ -4781,6 +4801,7 @@ type ArrayOfHostPtpConfigPtpPort struct { func init() { t["ArrayOfHostPtpConfigPtpPort"] = reflect.TypeOf((*ArrayOfHostPtpConfigPtpPort)(nil)).Elem() + minAPIVersionForType["ArrayOfHostPtpConfigPtpPort"] = "7.0.3.0" } // A boxed array of `HostQualifiedName`. To be used in `Any` placeholders. @@ -4790,6 +4811,7 @@ type ArrayOfHostQualifiedName struct { func init() { t["ArrayOfHostQualifiedName"] = reflect.TypeOf((*ArrayOfHostQualifiedName)(nil)).Elem() + minAPIVersionForType["ArrayOfHostQualifiedName"] = "7.0.3.0" } // A boxed array of `HostRdmaDevice`. To be used in `Any` placeholders. @@ -5024,6 +5046,7 @@ type ArrayOfHostTrustAuthorityAttestationInfo struct { func init() { t["ArrayOfHostTrustAuthorityAttestationInfo"] = reflect.TypeOf((*ArrayOfHostTrustAuthorityAttestationInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfHostTrustAuthorityAttestationInfo"] = "7.0.1.0" } // A boxed array of `HostUnresolvedVmfsExtent`. To be used in `Any` placeholders. @@ -5186,6 +5209,7 @@ type ArrayOfHostVvolNQN struct { func init() { t["ArrayOfHostVvolNQN"] = reflect.TypeOf((*ArrayOfHostVvolNQN)(nil)).Elem() + minAPIVersionForType["ArrayOfHostVvolNQN"] = "8.0.2.0" } // A boxed array of `HostVvolVolumeHostVvolNQN`. To be used in `Any` placeholders. @@ -5195,6 +5219,7 @@ type ArrayOfHostVvolVolumeHostVvolNQN struct { func init() { t["ArrayOfHostVvolVolumeHostVvolNQN"] = reflect.TypeOf((*ArrayOfHostVvolVolumeHostVvolNQN)(nil)).Elem() + minAPIVersionForType["ArrayOfHostVvolVolumeHostVvolNQN"] = "8.0.2.0" } // A boxed array of `HttpNfcLeaseDatastoreLeaseInfo`. To be used in `Any` placeholders. @@ -5240,6 +5265,7 @@ type ArrayOfHttpNfcLeaseProbeResult struct { func init() { t["ArrayOfHttpNfcLeaseProbeResult"] = reflect.TypeOf((*ArrayOfHttpNfcLeaseProbeResult)(nil)).Elem() + minAPIVersionForType["ArrayOfHttpNfcLeaseProbeResult"] = "7.0.2.0" } // A boxed array of `HttpNfcLeaseSourceFile`. To be used in `Any` placeholders. @@ -5600,6 +5626,7 @@ type ArrayOfNoPermissionEntityPrivileges struct { func init() { t["ArrayOfNoPermissionEntityPrivileges"] = reflect.TypeOf((*ArrayOfNoPermissionEntityPrivileges)(nil)).Elem() + minAPIVersionForType["ArrayOfNoPermissionEntityPrivileges"] = "7.0.3.2" } // A boxed array of `NsxHostVNicProfile`. To be used in `Any` placeholders. @@ -6589,6 +6616,7 @@ type ArrayOfVASAStorageArrayDiscoverySvcInfo struct { func init() { t["ArrayOfVASAStorageArrayDiscoverySvcInfo"] = reflect.TypeOf((*ArrayOfVASAStorageArrayDiscoverySvcInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfVASAStorageArrayDiscoverySvcInfo"] = "8.0.0.0" } // A boxed array of `VAppCloneSpecNetworkMappingPair`. To be used in `Any` placeholders. @@ -6886,6 +6914,7 @@ type ArrayOfVirtualMachineBaseIndependentFilterSpec struct { func init() { t["ArrayOfVirtualMachineBaseIndependentFilterSpec"] = reflect.TypeOf((*ArrayOfVirtualMachineBaseIndependentFilterSpec)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineBaseIndependentFilterSpec"] = "7.0.2.1" } // A boxed array of `VirtualMachineBootOptionsBootableDevice`. To be used in `Any` placeholders. @@ -6913,6 +6942,7 @@ type ArrayOfVirtualMachineCertThumbprint struct { func init() { t["ArrayOfVirtualMachineCertThumbprint"] = reflect.TypeOf((*ArrayOfVirtualMachineCertThumbprint)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineCertThumbprint"] = "7.0.3.1" } // A boxed array of `VirtualMachineConfigInfoDatastoreUrlPair`. To be used in `Any` placeholders. @@ -6949,6 +6979,7 @@ type ArrayOfVirtualMachineConnection struct { func init() { t["ArrayOfVirtualMachineConnection"] = reflect.TypeOf((*ArrayOfVirtualMachineConnection)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineConnection"] = "7.0.1.0" } // A boxed array of `VirtualMachineCpuIdInfoSpec`. To be used in `Any` placeholders. @@ -7003,6 +7034,7 @@ type ArrayOfVirtualMachineDvxClassInfo struct { func init() { t["ArrayOfVirtualMachineDvxClassInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineDvxClassInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineDvxClassInfo"] = "8.0.0.1" } // A boxed array of `VirtualMachineDynamicPassthroughInfo`. To be used in `Any` placeholders. @@ -7219,6 +7251,7 @@ type ArrayOfVirtualMachineQuickStatsMemoryTierStats struct { func init() { t["ArrayOfVirtualMachineQuickStatsMemoryTierStats"] = reflect.TypeOf((*ArrayOfVirtualMachineQuickStatsMemoryTierStats)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineQuickStatsMemoryTierStats"] = "7.0.3.0" } // A boxed array of `VirtualMachineRelocateSpecDiskLocator`. To be used in `Any` placeholders. @@ -7336,6 +7369,7 @@ type ArrayOfVirtualMachineVMotionStunTimeInfo struct { func init() { t["ArrayOfVirtualMachineVMotionStunTimeInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVMotionStunTimeInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineVMotionStunTimeInfo"] = "8.0.2.0" } // A boxed array of `VirtualMachineVcpuConfig`. To be used in `Any` placeholders. @@ -7354,6 +7388,7 @@ type ArrayOfVirtualMachineVendorDeviceGroupInfo struct { func init() { t["ArrayOfVirtualMachineVendorDeviceGroupInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVendorDeviceGroupInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineVendorDeviceGroupInfo"] = "8.0.0.1" } // A boxed array of `VirtualMachineVendorDeviceGroupInfoComponentDeviceInfo`. To be used in `Any` placeholders. @@ -7363,6 +7398,7 @@ type ArrayOfVirtualMachineVendorDeviceGroupInfoComponentDeviceInfo struct { func init() { t["ArrayOfVirtualMachineVendorDeviceGroupInfoComponentDeviceInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVendorDeviceGroupInfoComponentDeviceInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineVendorDeviceGroupInfoComponentDeviceInfo"] = "8.0.0.1" } // A boxed array of `VirtualMachineVgpuDeviceInfo`. To be used in `Any` placeholders. @@ -7372,6 +7408,7 @@ type ArrayOfVirtualMachineVgpuDeviceInfo struct { func init() { t["ArrayOfVirtualMachineVgpuDeviceInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVgpuDeviceInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineVgpuDeviceInfo"] = "7.0.3.0" } // A boxed array of `VirtualMachineVgpuProfileInfo`. To be used in `Any` placeholders. @@ -7381,6 +7418,7 @@ type ArrayOfVirtualMachineVgpuProfileInfo struct { func init() { t["ArrayOfVirtualMachineVgpuProfileInfo"] = reflect.TypeOf((*ArrayOfVirtualMachineVgpuProfileInfo)(nil)).Elem() + minAPIVersionForType["ArrayOfVirtualMachineVgpuProfileInfo"] = "7.0.3.0" } // A boxed array of `VirtualMachineVirtualDeviceGroupsDeviceGroup`. To be used in `Any` placeholders. @@ -7863,7 +7901,6 @@ type AuthenticationProfile struct { func init() { t["AuthenticationProfile"] = reflect.TypeOf((*AuthenticationProfile)(nil)).Elem() - minAPIVersionForType["AuthenticationProfile"] = "4.1" } // Static strings for authorization. @@ -8017,12 +8054,12 @@ type AutoStartPowerInfo struct { // powered on at any time. Machines with a -1 value are typically powered on and // off after all virtual machines with positive startOrder values. Failure to // meet the following requirements results in an InvalidArgument exception: - // - startOrder must be set to -1 if startAction is set to none - // - startOrder must be -1 or positive integers. Values such as 0 or - // \-2 are not valid. - // - startOrder is relative to other virtual machines in the autostart - // sequence. Hence specifying a startOrder of 4 when there are only 3 - // virtual machines in the Autostart sequence is not valid. + // - startOrder must be set to -1 if startAction is set to none + // - startOrder must be -1 or positive integers. Values such as 0 or + // \-2 are not valid. + // - startOrder is relative to other virtual machines in the autostart + // sequence. Hence specifying a startOrder of 4 when there are only 3 + // virtual machines in the Autostart sequence is not valid. // // If a newly established or changed startOrder value for a virtual machine // matches an existing startOrder value, the newly applied value takes @@ -8116,7 +8153,6 @@ type BackupBlobReadFailure struct { func init() { t["BackupBlobReadFailure"] = reflect.TypeOf((*BackupBlobReadFailure)(nil)).Elem() - minAPIVersionForType["BackupBlobReadFailure"] = "5.1" } type BackupBlobReadFailureFault BackupBlobReadFailure @@ -8139,7 +8175,6 @@ type BackupBlobWriteFailure struct { func init() { t["BackupBlobWriteFailure"] = reflect.TypeOf((*BackupBlobWriteFailure)(nil)).Elem() - minAPIVersionForType["BackupBlobWriteFailure"] = "5.1" } type BackupBlobWriteFailureFault BackupBlobWriteFailure @@ -8200,19 +8235,19 @@ type BaseConfigInfo struct { // Choice of the deletion behavior of this virtual storage object. // // If not set, the default value is false. - KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm" json:"keepAfterDeleteVm,omitempty" vim:"6.7"` + KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm" json:"keepAfterDeleteVm,omitempty"` // Is virtual storage object relocation disabled. // // If not set, the default value is false. - RelocationDisabled *bool `xml:"relocationDisabled" json:"relocationDisabled,omitempty" vim:"6.7"` + RelocationDisabled *bool `xml:"relocationDisabled" json:"relocationDisabled,omitempty"` // Is virtual storage object supports native snapshot. // // If not set, the default value is false. - NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported" json:"nativeSnapshotSupported,omitempty" vim:"6.7"` + NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported" json:"nativeSnapshotSupported,omitempty"` // If Virtua storage object has changed block tracking enabled. // // If not set, the default value is false. - ChangedBlockTrackingEnabled *bool `xml:"changedBlockTrackingEnabled" json:"changedBlockTrackingEnabled,omitempty" vim:"6.7"` + ChangedBlockTrackingEnabled *bool `xml:"changedBlockTrackingEnabled" json:"changedBlockTrackingEnabled,omitempty"` // Backing of this object. Backing BaseBaseConfigInfoBackingInfo `xml:"backing,typeattr" json:"backing"` // Metadata associated with the FCD if available. @@ -8225,12 +8260,11 @@ type BaseConfigInfo struct { // // See `IoFilterInfo.id`. // The client cannot modify this information on a virtual machine. - Iofilter []string `xml:"iofilter,omitempty" json:"iofilter,omitempty" vim:"6.7"` + Iofilter []string `xml:"iofilter,omitempty" json:"iofilter,omitempty"` } func init() { t["BaseConfigInfo"] = reflect.TypeOf((*BaseConfigInfo)(nil)).Elem() - minAPIVersionForType["BaseConfigInfo"] = "6.5" } // The data object type is a base type of backing of a virtual @@ -8246,7 +8280,6 @@ type BaseConfigInfoBackingInfo struct { func init() { t["BaseConfigInfoBackingInfo"] = reflect.TypeOf((*BaseConfigInfoBackingInfo)(nil)).Elem() - minAPIVersionForType["BaseConfigInfoBackingInfo"] = "6.5" } // The data object type for disk file backing of a virtual storage @@ -8266,7 +8299,6 @@ type BaseConfigInfoDiskFileBackingInfo struct { func init() { t["BaseConfigInfoDiskFileBackingInfo"] = reflect.TypeOf((*BaseConfigInfoDiskFileBackingInfo)(nil)).Elem() - minAPIVersionForType["BaseConfigInfoDiskFileBackingInfo"] = "6.5" } // Information for file backing of a virtual storage @@ -8300,12 +8332,11 @@ type BaseConfigInfoFileBackingInfo struct { // `BaseConfigInfoFileBackingInfo.parent` is set. DeltaSizeInMB int64 `xml:"deltaSizeInMB,omitempty" json:"deltaSizeInMB,omitempty"` // key id used to encrypt the backing disk. - KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty" vim:"7.0"` + KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty"` } func init() { t["BaseConfigInfoFileBackingInfo"] = reflect.TypeOf((*BaseConfigInfoFileBackingInfo)(nil)).Elem() - minAPIVersionForType["BaseConfigInfoFileBackingInfo"] = "6.5" } // This data object type contains information about raw device mapping. @@ -8325,7 +8356,6 @@ type BaseConfigInfoRawDiskMappingBackingInfo struct { func init() { t["BaseConfigInfoRawDiskMappingBackingInfo"] = reflect.TypeOf((*BaseConfigInfoRawDiskMappingBackingInfo)(nil)).Elem() - minAPIVersionForType["BaseConfigInfoRawDiskMappingBackingInfo"] = "6.5" } // The parameters of `Folder.BatchAddHostsToCluster_Task`. @@ -8436,7 +8466,6 @@ type BatchResult struct { func init() { t["BatchResult"] = reflect.TypeOf((*BatchResult)(nil)).Elem() - minAPIVersionForType["BatchResult"] = "6.0" } type BindVnic BindVnicRequestType @@ -8470,7 +8499,6 @@ type BlockedByFirewall struct { func init() { t["BlockedByFirewall"] = reflect.TypeOf((*BlockedByFirewall)(nil)).Elem() - minAPIVersionForType["BlockedByFirewall"] = "4.1" } type BlockedByFirewallFault BlockedByFirewall @@ -8507,7 +8535,6 @@ type BoolPolicy struct { func init() { t["BoolPolicy"] = reflect.TypeOf((*BoolPolicy)(nil)).Elem() - minAPIVersionForType["BoolPolicy"] = "4.0" } type BrowseDiagnosticLog BrowseDiagnosticLogRequestType @@ -8556,7 +8583,6 @@ type CAMServerRefusedConnection struct { func init() { t["CAMServerRefusedConnection"] = reflect.TypeOf((*CAMServerRefusedConnection)(nil)).Elem() - minAPIVersionForType["CAMServerRefusedConnection"] = "5.0" } type CAMServerRefusedConnectionFault CAMServerRefusedConnection @@ -8578,7 +8604,7 @@ type CanProvisionObjectsRequestType struct { Npbs []VsanNewPolicyBatch `xml:"npbs" json:"npbs"` // Optionally populate PolicyCost even though // object cannot be provisioned in the current cluster topology. - IgnoreSatisfiability *bool `xml:"ignoreSatisfiability" json:"ignoreSatisfiability,omitempty" vim:"6.0"` + IgnoreSatisfiability *bool `xml:"ignoreSatisfiability" json:"ignoreSatisfiability,omitempty"` } func init() { @@ -8745,7 +8771,7 @@ type CannotAccessNetwork struct { // A reference to the network that cannot be accessed // // Refers instance of `Network`. - Network *ManagedObjectReference `xml:"network,omitempty" json:"network,omitempty" vim:"6.0"` + Network *ManagedObjectReference `xml:"network,omitempty" json:"network,omitempty"` } func init() { @@ -8862,7 +8888,6 @@ type CannotAddHostWithFTVmAsStandalone struct { func init() { t["CannotAddHostWithFTVmAsStandalone"] = reflect.TypeOf((*CannotAddHostWithFTVmAsStandalone)(nil)).Elem() - minAPIVersionForType["CannotAddHostWithFTVmAsStandalone"] = "4.0" } type CannotAddHostWithFTVmAsStandaloneFault CannotAddHostWithFTVmAsStandalone @@ -8879,7 +8904,6 @@ type CannotAddHostWithFTVmToDifferentCluster struct { func init() { t["CannotAddHostWithFTVmToDifferentCluster"] = reflect.TypeOf((*CannotAddHostWithFTVmToDifferentCluster)(nil)).Elem() - minAPIVersionForType["CannotAddHostWithFTVmToDifferentCluster"] = "4.0" } type CannotAddHostWithFTVmToDifferentClusterFault CannotAddHostWithFTVmToDifferentCluster @@ -8895,7 +8919,6 @@ type CannotAddHostWithFTVmToNonHACluster struct { func init() { t["CannotAddHostWithFTVmToNonHACluster"] = reflect.TypeOf((*CannotAddHostWithFTVmToNonHACluster)(nil)).Elem() - minAPIVersionForType["CannotAddHostWithFTVmToNonHACluster"] = "4.0" } type CannotAddHostWithFTVmToNonHAClusterFault CannotAddHostWithFTVmToNonHACluster @@ -8919,7 +8942,6 @@ type CannotChangeDrsBehaviorForFtSecondary struct { func init() { t["CannotChangeDrsBehaviorForFtSecondary"] = reflect.TypeOf((*CannotChangeDrsBehaviorForFtSecondary)(nil)).Elem() - minAPIVersionForType["CannotChangeDrsBehaviorForFtSecondary"] = "4.1" } type CannotChangeDrsBehaviorForFtSecondaryFault CannotChangeDrsBehaviorForFtSecondary @@ -8943,7 +8965,6 @@ type CannotChangeHaSettingsForFtSecondary struct { func init() { t["CannotChangeHaSettingsForFtSecondary"] = reflect.TypeOf((*CannotChangeHaSettingsForFtSecondary)(nil)).Elem() - minAPIVersionForType["CannotChangeHaSettingsForFtSecondary"] = "4.1" } type CannotChangeHaSettingsForFtSecondaryFault CannotChangeHaSettingsForFtSecondary @@ -8967,7 +8988,6 @@ type CannotChangeVsanClusterUuid struct { func init() { t["CannotChangeVsanClusterUuid"] = reflect.TypeOf((*CannotChangeVsanClusterUuid)(nil)).Elem() - minAPIVersionForType["CannotChangeVsanClusterUuid"] = "5.5" } type CannotChangeVsanClusterUuidFault CannotChangeVsanClusterUuid @@ -8988,7 +9008,6 @@ type CannotChangeVsanNodeUuid struct { func init() { t["CannotChangeVsanNodeUuid"] = reflect.TypeOf((*CannotChangeVsanNodeUuid)(nil)).Elem() - minAPIVersionForType["CannotChangeVsanNodeUuid"] = "5.5" } type CannotChangeVsanNodeUuidFault CannotChangeVsanNodeUuid @@ -9011,7 +9030,6 @@ type CannotComputeFTCompatibleHosts struct { func init() { t["CannotComputeFTCompatibleHosts"] = reflect.TypeOf((*CannotComputeFTCompatibleHosts)(nil)).Elem() - minAPIVersionForType["CannotComputeFTCompatibleHosts"] = "6.0" } type CannotComputeFTCompatibleHostsFault CannotComputeFTCompatibleHosts @@ -9028,7 +9046,6 @@ type CannotCreateFile struct { func init() { t["CannotCreateFile"] = reflect.TypeOf((*CannotCreateFile)(nil)).Elem() - minAPIVersionForType["CannotCreateFile"] = "2.5" } type CannotCreateFileFault CannotCreateFile @@ -9077,7 +9094,6 @@ type CannotDisableDrsOnClustersWithVApps struct { func init() { t["CannotDisableDrsOnClustersWithVApps"] = reflect.TypeOf((*CannotDisableDrsOnClustersWithVApps)(nil)).Elem() - minAPIVersionForType["CannotDisableDrsOnClustersWithVApps"] = "4.1" } type CannotDisableDrsOnClustersWithVAppsFault CannotDisableDrsOnClustersWithVApps @@ -9097,7 +9113,6 @@ type CannotDisableSnapshot struct { func init() { t["CannotDisableSnapshot"] = reflect.TypeOf((*CannotDisableSnapshot)(nil)).Elem() - minAPIVersionForType["CannotDisableSnapshot"] = "2.5" } type CannotDisableSnapshotFault CannotDisableSnapshot @@ -9117,7 +9132,6 @@ type CannotDisconnectHostWithFaultToleranceVm struct { func init() { t["CannotDisconnectHostWithFaultToleranceVm"] = reflect.TypeOf((*CannotDisconnectHostWithFaultToleranceVm)(nil)).Elem() - minAPIVersionForType["CannotDisconnectHostWithFaultToleranceVm"] = "4.0" } type CannotDisconnectHostWithFaultToleranceVmFault CannotDisconnectHostWithFaultToleranceVm @@ -9149,13 +9163,12 @@ type CannotEnableVmcpForCluster struct { // for enabling vSphere VMCP. // // It can be the following reason. - // - APDTimeout disabled. + // - APDTimeout disabled. Reason string `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { t["CannotEnableVmcpForCluster"] = reflect.TypeOf((*CannotEnableVmcpForCluster)(nil)).Elem() - minAPIVersionForType["CannotEnableVmcpForCluster"] = "6.0" } type CannotEnableVmcpForClusterFault CannotEnableVmcpForCluster @@ -9213,7 +9226,6 @@ type CannotMoveFaultToleranceVm struct { func init() { t["CannotMoveFaultToleranceVm"] = reflect.TypeOf((*CannotMoveFaultToleranceVm)(nil)).Elem() - minAPIVersionForType["CannotMoveFaultToleranceVm"] = "4.0" } type CannotMoveFaultToleranceVmFault CannotMoveFaultToleranceVm @@ -9230,7 +9242,6 @@ type CannotMoveHostWithFaultToleranceVm struct { func init() { t["CannotMoveHostWithFaultToleranceVm"] = reflect.TypeOf((*CannotMoveHostWithFaultToleranceVm)(nil)).Elem() - minAPIVersionForType["CannotMoveHostWithFaultToleranceVm"] = "4.0" } type CannotMoveHostWithFaultToleranceVmFault CannotMoveHostWithFaultToleranceVm @@ -9250,7 +9261,6 @@ type CannotMoveVmWithDeltaDisk struct { func init() { t["CannotMoveVmWithDeltaDisk"] = reflect.TypeOf((*CannotMoveVmWithDeltaDisk)(nil)).Elem() - minAPIVersionForType["CannotMoveVmWithDeltaDisk"] = "5.0" } type CannotMoveVmWithDeltaDiskFault CannotMoveVmWithDeltaDisk @@ -9267,7 +9277,6 @@ type CannotMoveVmWithNativeDeltaDisk struct { func init() { t["CannotMoveVmWithNativeDeltaDisk"] = reflect.TypeOf((*CannotMoveVmWithNativeDeltaDisk)(nil)).Elem() - minAPIVersionForType["CannotMoveVmWithNativeDeltaDisk"] = "5.0" } type CannotMoveVmWithNativeDeltaDiskFault CannotMoveVmWithNativeDeltaDisk @@ -9289,7 +9298,6 @@ type CannotMoveVsanEnabledHost struct { func init() { t["CannotMoveVsanEnabledHost"] = reflect.TypeOf((*CannotMoveVsanEnabledHost)(nil)).Elem() - minAPIVersionForType["CannotMoveVsanEnabledHost"] = "5.5" } type CannotMoveVsanEnabledHostFault BaseCannotMoveVsanEnabledHost @@ -9307,7 +9315,6 @@ type CannotPlaceWithoutPrerequisiteMoves struct { func init() { t["CannotPlaceWithoutPrerequisiteMoves"] = reflect.TypeOf((*CannotPlaceWithoutPrerequisiteMoves)(nil)).Elem() - minAPIVersionForType["CannotPlaceWithoutPrerequisiteMoves"] = "5.1" } type CannotPlaceWithoutPrerequisiteMovesFault CannotPlaceWithoutPrerequisiteMoves @@ -9337,7 +9344,6 @@ type CannotPowerOffVmInCluster struct { func init() { t["CannotPowerOffVmInCluster"] = reflect.TypeOf((*CannotPowerOffVmInCluster)(nil)).Elem() - minAPIVersionForType["CannotPowerOffVmInCluster"] = "5.0" } type CannotPowerOffVmInClusterFault CannotPowerOffVmInCluster @@ -9356,7 +9362,6 @@ type CannotReconfigureVsanWhenHaEnabled struct { func init() { t["CannotReconfigureVsanWhenHaEnabled"] = reflect.TypeOf((*CannotReconfigureVsanWhenHaEnabled)(nil)).Elem() - minAPIVersionForType["CannotReconfigureVsanWhenHaEnabled"] = "5.5" } type CannotReconfigureVsanWhenHaEnabledFault CannotReconfigureVsanWhenHaEnabled @@ -9381,12 +9386,11 @@ type CannotUseNetwork struct { // A reference to the network that cannot be used // // Refers instance of `Network`. - Network *ManagedObjectReference `xml:"network,omitempty" json:"network,omitempty" vim:"6.0"` + Network *ManagedObjectReference `xml:"network,omitempty" json:"network,omitempty"` } func init() { t["CannotUseNetwork"] = reflect.TypeOf((*CannotUseNetwork)(nil)).Elem() - minAPIVersionForType["CannotUseNetwork"] = "5.5" } type CannotUseNetworkFault CannotUseNetwork @@ -9422,26 +9426,26 @@ type Capability struct { MultiHostSupported bool `xml:"multiHostSupported" json:"multiHostSupported"` // Flag indicating whether host user accounts should have the option to // be granted shell access - UserShellAccessSupported bool `xml:"userShellAccessSupported" json:"userShellAccessSupported" vim:"2.5"` + UserShellAccessSupported bool `xml:"userShellAccessSupported" json:"userShellAccessSupported"` // All supported Enhanced VMotion Compatibility modes. - SupportedEVCMode []EVCMode `xml:"supportedEVCMode,omitempty" json:"supportedEVCMode,omitempty" vim:"4.0"` + SupportedEVCMode []EVCMode `xml:"supportedEVCMode,omitempty" json:"supportedEVCMode,omitempty"` // All supported Enhanced VMotion Compatibility Graphics modes. SupportedEVCGraphicsMode []FeatureEVCMode `xml:"supportedEVCGraphicsMode,omitempty" json:"supportedEVCGraphicsMode,omitempty" vim:"7.0.1.0"` // Indicates whether network backup and restore feature is supported. - NetworkBackupAndRestoreSupported *bool `xml:"networkBackupAndRestoreSupported" json:"networkBackupAndRestoreSupported,omitempty" vim:"5.1"` + NetworkBackupAndRestoreSupported *bool `xml:"networkBackupAndRestoreSupported" json:"networkBackupAndRestoreSupported,omitempty"` // Is DRS supported for Fault Tolerance VMs without enabling EVC. - FtDrsWithoutEvcSupported *bool `xml:"ftDrsWithoutEvcSupported" json:"ftDrsWithoutEvcSupported,omitempty" vim:"6.7"` + FtDrsWithoutEvcSupported *bool `xml:"ftDrsWithoutEvcSupported" json:"ftDrsWithoutEvcSupported,omitempty"` // Specifies if the workflow for setting up a HCI cluster is supported. - HciWorkflowSupported *bool `xml:"hciWorkflowSupported" json:"hciWorkflowSupported,omitempty" vim:"6.7.1"` + HciWorkflowSupported *bool `xml:"hciWorkflowSupported" json:"hciWorkflowSupported,omitempty"` // Specifies the supported compute policy version. - ComputePolicyVersion int32 `xml:"computePolicyVersion,omitempty" json:"computePolicyVersion,omitempty" vim:"6.8.7"` + ComputePolicyVersion int32 `xml:"computePolicyVersion,omitempty" json:"computePolicyVersion,omitempty"` ClusterPlacementSupported *bool `xml:"clusterPlacementSupported" json:"clusterPlacementSupported,omitempty"` // Specifies if lifecycle management of a Cluster is supported. - LifecycleManagementSupported *bool `xml:"lifecycleManagementSupported" json:"lifecycleManagementSupported,omitempty" vim:"7.0"` + LifecycleManagementSupported *bool `xml:"lifecycleManagementSupported" json:"lifecycleManagementSupported,omitempty"` // Specifies if host seeding for a cluster is supported. HostSeedingSupported *bool `xml:"hostSeedingSupported" json:"hostSeedingSupported,omitempty" vim:"7.0.2.0"` // Specifies if scalable shares for resource pools is supported. - ScalableSharesSupported *bool `xml:"scalableSharesSupported" json:"scalableSharesSupported,omitempty" vim:"7.0"` + ScalableSharesSupported *bool `xml:"scalableSharesSupported" json:"scalableSharesSupported,omitempty"` // Specifies if highly available distributed clustering service is supported. HadcsSupported *bool `xml:"hadcsSupported" json:"hadcsSupported,omitempty" vim:"7.0.1.1"` // Specifies if desired configuration management platform is supported @@ -9724,7 +9728,6 @@ type ChangesInfoEventArgument struct { func init() { t["ChangesInfoEventArgument"] = reflect.TypeOf((*ChangesInfoEventArgument)(nil)).Elem() - minAPIVersionForType["ChangesInfoEventArgument"] = "6.5" } // The parameters of `ClusterEVCManager.CheckAddHostEvc_Task`. @@ -9864,24 +9867,24 @@ type CheckComplianceRequestType struct { // // E represents if Entity is specified. // - // P ^P - // --------------------------------------------------- - // | Check compliance | Profiles associated | - // E| of each entity | with the specified | - // | against each of the | entity will be used | - // | profiles specified. | for checking | - // | | compliance. | - // | | | - // | | | - // --------------------------------------------------- - // | All entities | InvalidArgument | - // | associated with the | Exception is thrown. | - // | profile are checked. | | - // ^E| | | - // | | | - // | | | - // | | | - // --------------------------------------------------- + // P ^P + // --------------------------------------------------- + // | Check compliance | Profiles associated | + // E| of each entity | with the specified | + // | against each of the | entity will be used | + // | profiles specified. | for checking | + // | | compliance. | + // | | | + // | | | + // --------------------------------------------------- + // | All entities | InvalidArgument | + // | associated with the | Exception is thrown. | + // | profile are checked. | | + // ^E| | | + // | | | + // | | | + // | | | + // --------------------------------------------------- // // Refers instances of `Profile`. Profile []ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty"` @@ -9981,10 +9984,10 @@ type CheckForUpdatesRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` // The data version currently known to the client. The value // must be either - // - the special initial version (an empty string) - // - a data version returned from `PropertyCollector.CheckForUpdates` or `PropertyCollector.WaitForUpdates` by the same `PropertyCollector` on the same session. - // - a non-truncated data version returned from `PropertyCollector.WaitForUpdatesEx` by the same `PropertyCollector` on the same - // session. + // - the special initial version (an empty string) + // - a data version returned from `PropertyCollector.CheckForUpdates` or `PropertyCollector.WaitForUpdates` by the same `PropertyCollector` on the same session. + // - a non-truncated data version returned from `PropertyCollector.WaitForUpdatesEx` by the same `PropertyCollector` on the same + // session. Version string `xml:"version,omitempty" json:"version,omitempty"` } @@ -10258,7 +10261,6 @@ type CheckResult struct { func init() { t["CheckResult"] = reflect.TypeOf((*CheckResult)(nil)).Elem() - minAPIVersionForType["CheckResult"] = "4.0" } // The parameters of `VirtualMachineCompatibilityChecker.CheckVmConfig_Task`. @@ -10442,7 +10444,6 @@ type ClockSkew struct { func init() { t["ClockSkew"] = reflect.TypeOf((*ClockSkew)(nil)).Elem() - minAPIVersionForType["ClockSkew"] = "4.1" } type ClockSkewFault ClockSkew @@ -10461,7 +10462,6 @@ type CloneFromSnapshotNotSupported struct { func init() { t["CloneFromSnapshotNotSupported"] = reflect.TypeOf((*CloneFromSnapshotNotSupported)(nil)).Elem() - minAPIVersionForType["CloneFromSnapshotNotSupported"] = "4.0" } type CloneFromSnapshotNotSupportedFault CloneFromSnapshotNotSupported @@ -10624,7 +10624,6 @@ type ClusterAction struct { func init() { t["ClusterAction"] = reflect.TypeOf((*ClusterAction)(nil)).Elem() - minAPIVersionForType["ClusterAction"] = "2.5" } // Base class for all action history. @@ -10639,7 +10638,6 @@ type ClusterActionHistory struct { func init() { t["ClusterActionHistory"] = reflect.TypeOf((*ClusterActionHistory)(nil)).Elem() - minAPIVersionForType["ClusterActionHistory"] = "2.5" } // The `ClusterAffinityRuleSpec` data object defines a set @@ -10694,7 +10692,6 @@ type ClusterAttemptedVmInfo struct { func init() { t["ClusterAttemptedVmInfo"] = reflect.TypeOf((*ClusterAttemptedVmInfo)(nil)).Elem() - minAPIVersionForType["ClusterAttemptedVmInfo"] = "2.5" } // Describes an action for the initial placement of a virtual machine in a @@ -10773,7 +10770,6 @@ type ClusterComplianceCheckedEvent struct { func init() { t["ClusterComplianceCheckedEvent"] = reflect.TypeOf((*ClusterComplianceCheckedEvent)(nil)).Elem() - minAPIVersionForType["ClusterComplianceCheckedEvent"] = "4.0" } // ClusterConfigResult is the result returned for the `ClusterComputeResource.ConfigureHCI_Task` @@ -10791,7 +10787,29 @@ type ClusterComputeResourceClusterConfigResult struct { func init() { t["ClusterComputeResourceClusterConfigResult"] = reflect.TypeOf((*ClusterComputeResourceClusterConfigResult)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceClusterConfigResult"] = "6.7.1" +} + +// The encryption mode policy for a cluster. +type ClusterComputeResourceCryptoModePolicy struct { + DynamicData + + // The host key identifier. + // + // When set, all hosts in the cluster will use this key when enabling + // the crypto safe mode. Only one of `ClusterComputeResourceCryptoModePolicy.keyId` and + // `ClusterComputeResourceCryptoModePolicy.providerId` may be set. + KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty"` + // The host key provider identifier. + // + // When set, all hosts in the cluster will use a key from the specified + // key provider when enabling the crypto safe mode. Only one of + // `ClusterComputeResourceCryptoModePolicy.keyId` and `ClusterComputeResourceCryptoModePolicy.providerId` may be set. + ProviderId *KeyProviderId `xml:"providerId,omitempty" json:"providerId,omitempty"` +} + +func init() { + t["ClusterComputeResourceCryptoModePolicy"] = reflect.TypeOf((*ClusterComputeResourceCryptoModePolicy)(nil)).Elem() + minAPIVersionForType["ClusterComputeResourceCryptoModePolicy"] = "8.0.3.0" } // Describes the validations applicable to the network settings. @@ -10810,7 +10828,6 @@ type ClusterComputeResourceDVSConfigurationValidation struct { func init() { t["ClusterComputeResourceDVSConfigurationValidation"] = reflect.TypeOf((*ClusterComputeResourceDVSConfigurationValidation)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceDVSConfigurationValidation"] = "6.7.1" } // Contains reference to the DVS, list of physical nics attached to it, @@ -10831,7 +10848,6 @@ type ClusterComputeResourceDVSSetting struct { func init() { t["ClusterComputeResourceDVSSetting"] = reflect.TypeOf((*ClusterComputeResourceDVSSetting)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceDVSSetting"] = "6.7.1" } type ClusterComputeResourceDVSSettingDVPortgroupToServiceMapping struct { @@ -10874,7 +10890,6 @@ type ClusterComputeResourceDvsProfile struct { func init() { t["ClusterComputeResourceDvsProfile"] = reflect.TypeOf((*ClusterComputeResourceDvsProfile)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceDvsProfile"] = "6.7.1" } type ClusterComputeResourceDvsProfileDVPortgroupSpecToServiceMapping struct { @@ -10926,7 +10941,6 @@ type ClusterComputeResourceHCIConfigInfo struct { func init() { t["ClusterComputeResourceHCIConfigInfo"] = reflect.TypeOf((*ClusterComputeResourceHCIConfigInfo)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceHCIConfigInfo"] = "6.7.1" } // Specification to configure the cluster. @@ -10956,13 +10970,13 @@ type ClusterComputeResourceHCIConfigSpec struct { func init() { t["ClusterComputeResourceHCIConfigSpec"] = reflect.TypeOf((*ClusterComputeResourceHCIConfigSpec)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceHCIConfigSpec"] = "6.7.1" } // Host configuration input to configure hosts in a cluster. type ClusterComputeResourceHostConfigurationInput struct { DynamicData + // Refers instance of `HostSystem`. Host ManagedObjectReference `xml:"host" json:"host"` HostVmkNics []ClusterComputeResourceHostVmkNicInfo `xml:"hostVmkNics,omitempty" json:"hostVmkNics,omitempty"` // To apply configuration on the host, the host is expected to be in @@ -10975,7 +10989,6 @@ type ClusterComputeResourceHostConfigurationInput struct { func init() { t["ClusterComputeResourceHostConfigurationInput"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationInput)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceHostConfigurationInput"] = "6.7.1" } // HostConfigurationProfile describes the configuration of services @@ -10991,7 +11004,6 @@ type ClusterComputeResourceHostConfigurationProfile struct { func init() { t["ClusterComputeResourceHostConfigurationProfile"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationProfile)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceHostConfigurationProfile"] = "6.7.1" } // Describes the validations applicable to the settings on the host. @@ -11015,7 +11027,35 @@ type ClusterComputeResourceHostConfigurationValidation struct { func init() { t["ClusterComputeResourceHostConfigurationValidation"] = reflect.TypeOf((*ClusterComputeResourceHostConfigurationValidation)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceHostConfigurationValidation"] = "6.7.1" +} + +// This data object describes what evacuation actions should be made for a given host. +type ClusterComputeResourceHostEvacuationInfo struct { + DynamicData + + // Candidate host to be put into maintenance mode. + // + // Refers instance of `HostSystem`. + Host ManagedObjectReference `xml:"host" json:"host"` + // Specifies the list of required actions. + // + // Depending on the specified option values passed, additional + // actions such as ones related to evacuation of specific objects, + // additional memory reservation or allowing/disallowing certain groups + // of operations may be taken when entering the desired flavor of + // maintenance mode. The list of supported options and values may vary + // based on the version of the ESXi host and Virtual Center. + // + // If unset, a default list of actions will be assumed based on the + // selected flavor of maintenance mode as specified by the + // `ClusterComputeResourceMaintenanceInfo.partialMMId` field. See `HostPartialMaintenanceModeId_enum` + // for further information about individual flavors. + Action []BaseOptionValue `xml:"action,omitempty,typeattr" json:"action,omitempty"` +} + +func init() { + t["ClusterComputeResourceHostEvacuationInfo"] = reflect.TypeOf((*ClusterComputeResourceHostEvacuationInfo)(nil)).Elem() + minAPIVersionForType["ClusterComputeResourceHostEvacuationInfo"] = "8.0.3.0" } // This data object describes how a vmknic on a host must be configured. @@ -11033,7 +11073,25 @@ type ClusterComputeResourceHostVmkNicInfo struct { func init() { t["ClusterComputeResourceHostVmkNicInfo"] = reflect.TypeOf((*ClusterComputeResourceHostVmkNicInfo)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceHostVmkNicInfo"] = "6.7.1" +} + +// This data object describes how a given array of hosts will be put into maintenance mode. +type ClusterComputeResourceMaintenanceInfo struct { + DynamicData + + // Indicates the flavor of maintenance mode requested. + // + // If set, specifies the desired flavor of partial + // maintenance mode. Otherwise, full maintenance mode is assumed. + // See `HostPartialMaintenanceModeId_enum` for supported values. + PartialMMId string `xml:"partialMMId,omitempty" json:"partialMMId,omitempty"` + // Evaucation information for each host + HostEvacInfo []ClusterComputeResourceHostEvacuationInfo `xml:"hostEvacInfo,omitempty" json:"hostEvacInfo,omitempty"` +} + +func init() { + t["ClusterComputeResourceMaintenanceInfo"] = reflect.TypeOf((*ClusterComputeResourceMaintenanceInfo)(nil)).Elem() + minAPIVersionForType["ClusterComputeResourceMaintenanceInfo"] = "8.0.3.0" } // The `ClusterComputeResourceSummary` data object @@ -11056,44 +11114,44 @@ type ClusterComputeResourceSummary struct { // // The actual type of admissionControlInfo will depend on what kind of // `ClusterDasAdmissionControlPolicy` was used to configure the cluster. - AdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:"admissionControlInfo,omitempty,typeattr" json:"admissionControlInfo,omitempty" vim:"4.0"` + AdmissionControlInfo BaseClusterDasAdmissionControlInfo `xml:"admissionControlInfo,omitempty,typeattr" json:"admissionControlInfo,omitempty"` // Total number of migrations with VMotion that have been done internal to this // cluster. NumVmotions int32 `xml:"numVmotions" json:"numVmotions"` // The target balance, in terms of standard deviation, for a DRS cluster. // // Units are thousandths. For example, 12 represents 0.012. - TargetBalance int32 `xml:"targetBalance,omitempty" json:"targetBalance,omitempty" vim:"4.0"` + TargetBalance int32 `xml:"targetBalance,omitempty" json:"targetBalance,omitempty"` // The current balance, in terms of standard deviation, for a DRS cluster. // // Units are thousandths. For example, 12 represents 0.012. - CurrentBalance int32 `xml:"currentBalance,omitempty" json:"currentBalance,omitempty" vim:"4.0"` + CurrentBalance int32 `xml:"currentBalance,omitempty" json:"currentBalance,omitempty"` // The DRS score of this cluster, in percentage. - DrsScore int32 `xml:"drsScore,omitempty" json:"drsScore,omitempty" vim:"7.0"` + DrsScore int32 `xml:"drsScore,omitempty" json:"drsScore,omitempty"` // The number of VMs in this cluster corresponding to each DRS score // bucket. // // The buckets are defined as follows: - // - 0% - 20% - // - 21% - 40% - // - 41% - 60% - // - 61% - 80% - // - 81% - 100% - NumVmsPerDrsScoreBucket []int32 `xml:"numVmsPerDrsScoreBucket,omitempty" json:"numVmsPerDrsScoreBucket,omitempty" vim:"7.0"` + // - 0% - 20% + // - 21% - 40% + // - 41% - 60% + // - 61% - 80% + // - 81% - 100% + NumVmsPerDrsScoreBucket []int32 `xml:"numVmsPerDrsScoreBucket,omitempty" json:"numVmsPerDrsScoreBucket,omitempty"` // The current usage summary for a DRS cluster. - UsageSummary *ClusterUsageSummary `xml:"usageSummary,omitempty" json:"usageSummary,omitempty" vim:"6.0"` + UsageSummary *ClusterUsageSummary `xml:"usageSummary,omitempty" json:"usageSummary,omitempty"` // The Enhanced VMotion Compatibility mode that is currently in effect // for all hosts in this cluster; unset if no EVC mode is active. // // See also `Capability.supportedEVCMode`. - CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty" json:"currentEVCModeKey,omitempty" vim:"4.0"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty" json:"currentEVCModeKey,omitempty"` // The Enhanced VMotion Compatibility Graphics mode that is currently in // effect for all hosts in this cluster; unset if no EVC mode is active. // // See also `Capability.supportedEVCGraphicsMode`. CurrentEVCGraphicsModeKey string `xml:"currentEVCGraphicsModeKey,omitempty" json:"currentEVCGraphicsModeKey,omitempty" vim:"7.0.1.0"` // Data pertaining to DAS. - DasData BaseClusterDasData `xml:"dasData,omitempty,typeattr" json:"dasData,omitempty" vim:"5.0"` + DasData BaseClusterDasData `xml:"dasData,omitempty,typeattr" json:"dasData,omitempty"` // Configuration pertinent to state of the cluster maintenance mode. // // Valid values are enumerated by the `ClusterMaintenanceModeStatus` @@ -11131,7 +11189,6 @@ type ClusterComputeResourceVCProfile struct { func init() { t["ClusterComputeResourceVCProfile"] = reflect.TypeOf((*ClusterComputeResourceVCProfile)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceVCProfile"] = "6.7.1" } // Describes the validation results. @@ -11144,7 +11201,6 @@ type ClusterComputeResourceValidationResultBase struct { func init() { t["ClusterComputeResourceValidationResultBase"] = reflect.TypeOf((*ClusterComputeResourceValidationResultBase)(nil)).Elem() - minAPIVersionForType["ClusterComputeResourceValidationResultBase"] = "6.7.1" } type ClusterComputeResourceVcsSlots struct { @@ -11166,6 +11222,7 @@ type ClusterComputeResourceVcsSlots struct { func init() { t["ClusterComputeResourceVcsSlots"] = reflect.TypeOf((*ClusterComputeResourceVcsSlots)(nil)).Elem() + minAPIVersionForType["ClusterComputeResourceVcsSlots"] = "7.0.1.1" } // Deprecated as of VI API 2.5, use `ClusterConfigInfoEx`. @@ -11235,7 +11292,7 @@ type ClusterConfigInfoEx struct { // Cluster-wide rules. Rule []BaseClusterRuleInfo `xml:"rule,omitempty,typeattr" json:"rule,omitempty"` // Cluster-wide configuration of VM orchestration. - Orchestration *ClusterOrchestrationInfo `xml:"orchestration,omitempty" json:"orchestration,omitempty" vim:"6.5"` + Orchestration *ClusterOrchestrationInfo `xml:"orchestration,omitempty" json:"orchestration,omitempty"` // List of virtual machine configurations that apply during cluster wide // VM orchestration. // @@ -11243,7 +11300,7 @@ type ClusterConfigInfoEx struct { // // If a virtual machine is not specified in this array, the service uses // the default settings for that virtual machine. - VmOrchestration []ClusterVmOrchestrationInfo `xml:"vmOrchestration,omitempty" json:"vmOrchestration,omitempty" vim:"6.5"` + VmOrchestration []ClusterVmOrchestrationInfo `xml:"vmOrchestration,omitempty" json:"vmOrchestration,omitempty"` // Cluster-wide configuration of the VMware DPM service. DpmConfigInfo *ClusterDpmConfigInfo `xml:"dpmConfigInfo,omitempty" json:"dpmConfigInfo,omitempty"` // List of host configurations for the VMware DPM @@ -11255,27 +11312,26 @@ type ClusterConfigInfoEx struct { // the cluster default settings for that host. DpmHostConfig []ClusterDpmHostConfigInfo `xml:"dpmHostConfig,omitempty" json:"dpmHostConfig,omitempty"` // Cluster-wide configuration of the VMware VSAN service. - VsanConfigInfo *VsanClusterConfigInfo `xml:"vsanConfigInfo,omitempty" json:"vsanConfigInfo,omitempty" vim:"5.5"` + VsanConfigInfo *VsanClusterConfigInfo `xml:"vsanConfigInfo,omitempty" json:"vsanConfigInfo,omitempty"` // List of host configurations for the VMware VSAN service. // // Each entry applies to one host. // // If a host is not specified in this array, the service uses // the cluster default settings for that host. - VsanHostConfig []VsanHostConfigInfo `xml:"vsanHostConfig,omitempty" json:"vsanHostConfig,omitempty" vim:"5.5"` + VsanHostConfig []VsanHostConfigInfo `xml:"vsanHostConfig,omitempty" json:"vsanHostConfig,omitempty"` // Cluster-wide groups. - Group []BaseClusterGroupInfo `xml:"group,omitempty,typeattr" json:"group,omitempty" vim:"4.1"` + Group []BaseClusterGroupInfo `xml:"group,omitempty,typeattr" json:"group,omitempty"` // Cluster-wide configuration of the VMware InfraUpdateHA service. - InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty" json:"infraUpdateHaConfig,omitempty" vim:"6.5"` + InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty" json:"infraUpdateHaConfig,omitempty"` // Cluster-wide configuration of the ProactiveDRS service. - ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty" json:"proactiveDrsConfig,omitempty" vim:"6.5"` + ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty" json:"proactiveDrsConfig,omitempty"` // Cluster-wide configuration of the encryption mode. - CryptoConfig *ClusterCryptoConfigInfo `xml:"cryptoConfig,omitempty" json:"cryptoConfig,omitempty" vim:"7.0"` + CryptoConfig *ClusterCryptoConfigInfo `xml:"cryptoConfig,omitempty" json:"cryptoConfig,omitempty"` } func init() { t["ClusterConfigInfoEx"] = reflect.TypeOf((*ClusterConfigInfoEx)(nil)).Elem() - minAPIVersionForType["ClusterConfigInfoEx"] = "2.5" } // Deprecated as of VI API 2.5, use `ClusterConfigSpecEx`. @@ -11530,13 +11586,13 @@ type ClusterConfigSpecEx struct { // Cluster affinity and anti-affinity rule configuration. RulesSpec []ClusterRuleSpec `xml:"rulesSpec,omitempty" json:"rulesSpec,omitempty"` // Cluster configuration of VM orchestration. - Orchestration *ClusterOrchestrationInfo `xml:"orchestration,omitempty" json:"orchestration,omitempty" vim:"6.5"` + Orchestration *ClusterOrchestrationInfo `xml:"orchestration,omitempty" json:"orchestration,omitempty"` // List of specific VM configurations that apply during cluster wide // VM orchestration. // // Each entry applies to one virtual machine, and // overrides the cluster default settings. - VmOrchestrationSpec []ClusterVmOrchestrationSpec `xml:"vmOrchestrationSpec,omitempty" json:"vmOrchestrationSpec,omitempty" vim:"6.5"` + VmOrchestrationSpec []ClusterVmOrchestrationSpec `xml:"vmOrchestrationSpec,omitempty" json:"vmOrchestrationSpec,omitempty"` // DPM configuration; includes default settings for hosts. DpmConfig *ClusterDpmConfigInfo `xml:"dpmConfig,omitempty" json:"dpmConfig,omitempty"` // DPM configuration for individual hosts. @@ -11552,35 +11608,34 @@ type ClusterConfigSpecEx struct { // by using cluster reconfiguration task id as // `TaskInfo.parentTaskKey`, and should be monitored // and tracked separatedly. - VsanConfig *VsanClusterConfigInfo `xml:"vsanConfig,omitempty" json:"vsanConfig,omitempty" vim:"5.5"` + VsanConfig *VsanClusterConfigInfo `xml:"vsanConfig,omitempty" json:"vsanConfig,omitempty"` // VSAN configuration for individual hosts. // // The entries in this array override the cluster default settings // as specified in `VsanClusterConfigInfo`. - VsanHostConfigSpec []VsanHostConfigInfo `xml:"vsanHostConfigSpec,omitempty" json:"vsanHostConfigSpec,omitempty" vim:"5.5"` + VsanHostConfigSpec []VsanHostConfigInfo `xml:"vsanHostConfigSpec,omitempty" json:"vsanHostConfigSpec,omitempty"` // Cluster-wide group configuration. // // The array contains one or more group specification objects. // A group specification object contains a virtual machine group // (`ClusterVmGroup`) or a host group (`ClusterHostGroup`). // Groups can be related; see `ClusterVmHostRuleInfo`. - GroupSpec []ClusterGroupSpec `xml:"groupSpec,omitempty" json:"groupSpec,omitempty" vim:"4.1"` + GroupSpec []ClusterGroupSpec `xml:"groupSpec,omitempty" json:"groupSpec,omitempty"` // InfraUpdateHA configuration. - InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty" json:"infraUpdateHaConfig,omitempty" vim:"6.5"` + InfraUpdateHaConfig *ClusterInfraUpdateHaConfigInfo `xml:"infraUpdateHaConfig,omitempty" json:"infraUpdateHaConfig,omitempty"` // ProactiveDrs configuration. - ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty" json:"proactiveDrsConfig,omitempty" vim:"6.5"` + ProactiveDrsConfig *ClusterProactiveDrsConfigInfo `xml:"proactiveDrsConfig,omitempty" json:"proactiveDrsConfig,omitempty"` // Flag to place the cluster in the HCI workflow during cluster creation. // // This flag is specified only at the time of cluster creation. // A cluster cannot be reconfigured to place it in the HCI workflow. - InHciWorkflow *bool `xml:"inHciWorkflow" json:"inHciWorkflow,omitempty" vim:"6.7.1"` + InHciWorkflow *bool `xml:"inHciWorkflow" json:"inHciWorkflow,omitempty"` // Cluster-wide configuration of encryption mode. - CryptoConfig *ClusterCryptoConfigInfo `xml:"cryptoConfig,omitempty" json:"cryptoConfig,omitempty" vim:"7.0"` + CryptoConfig *ClusterCryptoConfigInfo `xml:"cryptoConfig,omitempty" json:"cryptoConfig,omitempty"` } func init() { t["ClusterConfigSpecEx"] = reflect.TypeOf((*ClusterConfigSpecEx)(nil)).Elem() - minAPIVersionForType["ClusterConfigSpecEx"] = "2.5" } // This event records when a cluster is created. @@ -11602,6 +11657,11 @@ type ClusterCryptoConfigInfo struct { // // See `ClusterCryptoConfigInfoCryptoMode_enum` for supported values. CryptoMode string `xml:"cryptoMode,omitempty" json:"cryptoMode,omitempty"` + // The encryption mode policy for the cluster. + // + // When unset, host keys will be automatically generated using the current + // default key provider. + Policy *ClusterComputeResourceCryptoModePolicy `xml:"policy,omitempty" json:"policy,omitempty" vim:"8.0.3.0"` } func init() { @@ -11656,7 +11716,6 @@ type ClusterDasAamHostInfo struct { func init() { t["ClusterDasAamHostInfo"] = reflect.TypeOf((*ClusterDasAamHostInfo)(nil)).Elem() - minAPIVersionForType["ClusterDasAamHostInfo"] = "4.0" } // Deprecated as of vSphere API 5.0, this object is no longer returned by @@ -11708,7 +11767,6 @@ type ClusterDasAamNodeState struct { func init() { t["ClusterDasAamNodeState"] = reflect.TypeOf((*ClusterDasAamNodeState)(nil)).Elem() - minAPIVersionForType["ClusterDasAamNodeState"] = "4.0" } // Base class for admission control related information of a vSphere HA cluster. @@ -11718,7 +11776,6 @@ type ClusterDasAdmissionControlInfo struct { func init() { t["ClusterDasAdmissionControlInfo"] = reflect.TypeOf((*ClusterDasAdmissionControlInfo)(nil)).Elem() - minAPIVersionForType["ClusterDasAdmissionControlInfo"] = "4.0" } // Base class for specifying how admission control should be done for vSphere HA. @@ -11727,7 +11784,7 @@ type ClusterDasAdmissionControlPolicy struct { // Percentage of resource reduction that a cluster of VMs can tolerate // in case of a failover. - ResourceReductionToToleratePercent *int32 `xml:"resourceReductionToToleratePercent" json:"resourceReductionToToleratePercent,omitempty" vim:"6.5"` + ResourceReductionToToleratePercent *int32 `xml:"resourceReductionToToleratePercent" json:"resourceReductionToToleratePercent,omitempty"` // Flag that determines whether strict admission control for persistent // memory is enabled. // @@ -11737,15 +11794,14 @@ type ClusterDasAdmissionControlPolicy struct { // When you use persistent memory admission control, the following // operations are prevented, if doing so would violate the // `ClusterDasConfigInfo.admissionControlEnabled`. - // - Creating a virtual machine with persistent memory. - // - Adding a virtual persistent memory device to a virtual machine. - // - Increasing the capacity of a virtual persistent memory device. + // - Creating a virtual machine with persistent memory. + // - Adding a virtual persistent memory device to a virtual machine. + // - Increasing the capacity of a virtual persistent memory device. PMemAdmissionControlEnabled *bool `xml:"pMemAdmissionControlEnabled" json:"pMemAdmissionControlEnabled,omitempty" vim:"7.0.2.0"` } func init() { t["ClusterDasAdmissionControlPolicy"] = reflect.TypeOf((*ClusterDasAdmissionControlPolicy)(nil)).Elem() - minAPIVersionForType["ClusterDasAdmissionControlPolicy"] = "4.0" } // Base class for advanced runtime information related to the high @@ -11756,15 +11812,14 @@ type ClusterDasAdvancedRuntimeInfo struct { // The information pertaining to the HA agents on the hosts DasHostInfo BaseClusterDasHostInfo `xml:"dasHostInfo,omitempty,typeattr" json:"dasHostInfo,omitempty"` // Whether HA VM Component Protection can be enabled for the cluster. - VmcpSupported *ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo `xml:"vmcpSupported,omitempty" json:"vmcpSupported,omitempty" vim:"6.0"` + VmcpSupported *ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo `xml:"vmcpSupported,omitempty" json:"vmcpSupported,omitempty"` // The map of a datastore to the set of hosts that are using // the datastore for storage heartbeating. - HeartbeatDatastoreInfo []DasHeartbeatDatastoreInfo `xml:"heartbeatDatastoreInfo,omitempty" json:"heartbeatDatastoreInfo,omitempty" vim:"5.0"` + HeartbeatDatastoreInfo []DasHeartbeatDatastoreInfo `xml:"heartbeatDatastoreInfo,omitempty" json:"heartbeatDatastoreInfo,omitempty"` } func init() { t["ClusterDasAdvancedRuntimeInfo"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfo)(nil)).Elem() - minAPIVersionForType["ClusterDasAdvancedRuntimeInfo"] = "4.0" } // Class for capability to support VM Component Protection @@ -11783,7 +11838,6 @@ type ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo struct { func init() { t["ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo"] = reflect.TypeOf((*ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo)(nil)).Elem() - minAPIVersionForType["ClusterDasAdvancedRuntimeInfoVmcpCapabilityInfo"] = "6.0" } // The `ClusterDasConfigInfo` data object contains configuration data @@ -11810,7 +11864,7 @@ type ClusterDasConfigInfo struct { // The Service level specified for the cluster determines // the possible monitoring settings that you can use for individual virtual machines. // See `ClusterVmToolsMonitoringSettings*.*ClusterVmToolsMonitoringSettings.vmMonitoring`. - VmMonitoring string `xml:"vmMonitoring,omitempty" json:"vmMonitoring,omitempty" vim:"4.0"` + VmMonitoring string `xml:"vmMonitoring,omitempty" json:"vmMonitoring,omitempty"` // Determines whether HA restarts virtual machines after a host fails. // // The default value is @@ -11830,7 +11884,7 @@ type ClusterDasConfigInfo struct { // The rest of the cluster operations follow normal processing. // No configuration information is lost and re-enabling the service // is a quick operation. - HostMonitoring string `xml:"hostMonitoring,omitempty" json:"hostMonitoring,omitempty" vim:"4.0"` + HostMonitoring string `xml:"hostMonitoring,omitempty" json:"hostMonitoring,omitempty"` // This property indicates if vSphere HA VM Component Protection service // is enabled. // @@ -11846,7 +11900,7 @@ type ClusterDasConfigInfo struct { // by `ClusterVmComponentProtectionSettings` which is referenced by both cluster // level configuration (`ClusterDasConfigInfo.defaultVmSettings`) and per-VM // override `ClusterConfigInfoEx.dasVmConfig`. - VmComponentProtecting string `xml:"vmComponentProtecting,omitempty" json:"vmComponentProtecting,omitempty" vim:"6.0"` + VmComponentProtecting string `xml:"vmComponentProtecting,omitempty" json:"vmComponentProtecting,omitempty"` // Deprecated as of vSphere API 4.0, use // `ClusterFailoverLevelAdmissionControlPolicy` to set // `ClusterDasConfigInfo.admissionControlPolicy`. @@ -11861,54 +11915,54 @@ type ClusterDasConfigInfo struct { // Virtual machine admission control policy for vSphere HA. // // The policies specify resource availability for failover support. - // - Failover host admission policy - // `ClusterFailoverHostAdmissionControlPolicy` - - // specify one or more dedicated failover hosts. - // - Failover level policy - // `ClusterFailoverLevelAdmissionControlPolicy` - - // the limit of host failures for which resources are reserved. - // When you use the failover level policy, - // HA partitions resources into slots. A slot represents the minimum - // CPU and memory resources that are required to support - // any powered on virtual machine in the cluster. - // To retrieve information about partitioned resources, use the - // `ClusterComputeResource.RetrieveDasAdvancedRuntimeInfo` - // method. - // - Resources admission policy - // `ClusterFailoverResourcesAdmissionControlPolicy` - - // CPU and memory resources reserved for failover support. - // When you use the resources policy, you can reserve - // a percentage of the aggregate cluster resource for failover. - AdmissionControlPolicy BaseClusterDasAdmissionControlPolicy `xml:"admissionControlPolicy,omitempty,typeattr" json:"admissionControlPolicy,omitempty" vim:"4.0"` + // - Failover host admission policy + // `ClusterFailoverHostAdmissionControlPolicy` - + // specify one or more dedicated failover hosts. + // - Failover level policy + // `ClusterFailoverLevelAdmissionControlPolicy` - + // the limit of host failures for which resources are reserved. + // When you use the failover level policy, + // HA partitions resources into slots. A slot represents the minimum + // CPU and memory resources that are required to support + // any powered on virtual machine in the cluster. + // To retrieve information about partitioned resources, use the + // `ClusterComputeResource.RetrieveDasAdvancedRuntimeInfo` + // method. + // - Resources admission policy + // `ClusterFailoverResourcesAdmissionControlPolicy` - + // CPU and memory resources reserved for failover support. + // When you use the resources policy, you can reserve + // a percentage of the aggregate cluster resource for failover. + AdmissionControlPolicy BaseClusterDasAdmissionControlPolicy `xml:"admissionControlPolicy,omitempty,typeattr" json:"admissionControlPolicy,omitempty"` // Flag that determines whether strict admission control is enabled. // // When you use admission control, the following operations are // prevented, if doing so would violate the `ClusterDasConfigInfo.admissionControlPolicy`. - // - Powering on a virtual machine in the cluster. - // - Migrating a virtual machine into the cluster. - // - Increasing the CPU or memory reservation of powered-on - // virtual machines in the cluster. + // - Powering on a virtual machine in the cluster. + // - Migrating a virtual machine into the cluster. + // - Increasing the CPU or memory reservation of powered-on + // virtual machines in the cluster. // // With admission control disabled, there is no assurance that // all virtual machines in the HA cluster can be restarted after // a host failure. VMware recommends that you do not disable // admission control, but you might need to do so temporarily, // for the following reasons: - // - If you need to violate the failover constraints when there - // are not enough resources to support them (for example, - // if you are placing hosts in standby mode to test them - // for use with DPM). - // - If an automated process needs to take actions that might - // temporarily violate the failover constraints (for example, - // as part of an upgrade directed by VMware Update Manager). - // - If you need to perform testing or maintenance operations. + // - If you need to violate the failover constraints when there + // are not enough resources to support them (for example, + // if you are placing hosts in standby mode to test them + // for use with DPM). + // - If an automated process needs to take actions that might + // temporarily violate the failover constraints (for example, + // as part of an upgrade directed by VMware Update Manager). + // - If you need to perform testing or maintenance operations. AdmissionControlEnabled *bool `xml:"admissionControlEnabled" json:"admissionControlEnabled,omitempty"` // Cluster-wide defaults for virtual machine HA settings. // // When a virtual machine has no HA configuration // (`ClusterDasVmConfigSpec`), it uses the values // specified here. - DefaultVmSettings *ClusterDasVmSettings `xml:"defaultVmSettings,omitempty" json:"defaultVmSettings,omitempty" vim:"2.5"` + DefaultVmSettings *ClusterDasVmSettings `xml:"defaultVmSettings,omitempty" json:"defaultVmSettings,omitempty"` // Advanced settings. Option []BaseOptionValue `xml:"option,omitempty,typeattr" json:"option,omitempty"` // The list of preferred datastores to use for storage heartbeating. @@ -11928,14 +11982,14 @@ type ClusterDasConfigInfo struct { // `ClusterDasAdvancedRuntimeInfo.heartbeatDatastoreInfo`. // // Refers instances of `Datastore`. - HeartbeatDatastore []ManagedObjectReference `xml:"heartbeatDatastore,omitempty" json:"heartbeatDatastore,omitempty" vim:"5.0"` + HeartbeatDatastore []ManagedObjectReference `xml:"heartbeatDatastore,omitempty" json:"heartbeatDatastore,omitempty"` // The policy on what datastores will be used by vCenter Server to choose // heartbeat datastores. // // See `ClusterDasConfigInfoHBDatastoreCandidate_enum` for all options. // The default value is // `allFeasibleDsWithUserPreference`. - HBDatastoreCandidatePolicy string `xml:"hBDatastoreCandidatePolicy,omitempty" json:"hBDatastoreCandidatePolicy,omitempty" vim:"5.0"` + HBDatastoreCandidatePolicy string `xml:"hBDatastoreCandidatePolicy,omitempty" json:"hBDatastoreCandidatePolicy,omitempty"` } func init() { @@ -11949,7 +12003,6 @@ type ClusterDasData struct { func init() { t["ClusterDasData"] = reflect.TypeOf((*ClusterDasData)(nil)).Elem() - minAPIVersionForType["ClusterDasData"] = "5.0" } // This class contains the summary of the data that DAS needs/uses. @@ -11970,7 +12023,6 @@ type ClusterDasDataSummary struct { func init() { t["ClusterDasDataSummary"] = reflect.TypeOf((*ClusterDasDataSummary)(nil)).Elem() - minAPIVersionForType["ClusterDasDataSummary"] = "5.0" } // Advanced runtime information related to the high availability service @@ -12016,12 +12068,11 @@ type ClusterDasFailoverLevelAdvancedRuntimeInfo struct { HostSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots `xml:"hostSlots,omitempty" json:"hostSlots,omitempty"` // The list of virtual machines whose reservations and memory overhead are not // satisfied by a single slot. - VmsRequiringMultipleSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots `xml:"vmsRequiringMultipleSlots,omitempty" json:"vmsRequiringMultipleSlots,omitempty" vim:"5.1"` + VmsRequiringMultipleSlots []ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots `xml:"vmsRequiringMultipleSlots,omitempty" json:"vmsRequiringMultipleSlots,omitempty"` } func init() { t["ClusterDasFailoverLevelAdvancedRuntimeInfo"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfo)(nil)).Elem() - minAPIVersionForType["ClusterDasFailoverLevelAdvancedRuntimeInfo"] = "4.0" } type ClusterDasFailoverLevelAdvancedRuntimeInfoHostSlots struct { @@ -12065,7 +12116,6 @@ type ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo struct { func init() { t["ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo"] = reflect.TypeOf((*ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo)(nil)).Elem() - minAPIVersionForType["ClusterDasFailoverLevelAdvancedRuntimeInfoSlotInfo"] = "4.0" } type ClusterDasFailoverLevelAdvancedRuntimeInfoVmSlots struct { @@ -12136,7 +12186,6 @@ type ClusterDasFdmHostState struct { func init() { t["ClusterDasFdmHostState"] = reflect.TypeOf((*ClusterDasFdmHostState)(nil)).Elem() - minAPIVersionForType["ClusterDasFdmHostState"] = "5.0" } // HA specific advanced information pertaining to the hosts in the cluster. @@ -12146,7 +12195,6 @@ type ClusterDasHostInfo struct { func init() { t["ClusterDasHostInfo"] = reflect.TypeOf((*ClusterDasHostInfo)(nil)).Elem() - minAPIVersionForType["ClusterDasHostInfo"] = "4.0" } // A host recommendation for a virtual machine managed by the VMware @@ -12168,7 +12216,6 @@ type ClusterDasHostRecommendation struct { func init() { t["ClusterDasHostRecommendation"] = reflect.TypeOf((*ClusterDasHostRecommendation)(nil)).Elem() - minAPIVersionForType["ClusterDasHostRecommendation"] = "4.0" } // The `ClusterDasVmConfigInfo` data object contains @@ -12215,7 +12262,7 @@ type ClusterDasVmConfigInfo struct { // // Values specified in this object override the cluster-wide // defaults for virtual machines (`ClusterDasConfigInfo.defaultVmSettings`). - DasSettings *ClusterDasVmSettings `xml:"dasSettings,omitempty" json:"dasSettings,omitempty" vim:"2.5"` + DasSettings *ClusterDasVmSettings `xml:"dasSettings,omitempty" json:"dasSettings,omitempty"` } func init() { @@ -12271,7 +12318,7 @@ type ClusterDasVmSettings struct { // Timeout specified in seconds. To use cluster setting for a VM override, // set to -1 in per-VM. // setting. - RestartPriorityTimeout int32 `xml:"restartPriorityTimeout,omitempty" json:"restartPriorityTimeout,omitempty" vim:"6.5"` + RestartPriorityTimeout int32 `xml:"restartPriorityTimeout,omitempty" json:"restartPriorityTimeout,omitempty"` // Indicates whether or not the virtual machine should be powered off if a // host determines that it is isolated from the rest of the compute // resource. @@ -12282,20 +12329,20 @@ type ClusterDasVmSettings struct { // See also `ClusterDasVmSettingsIsolationResponse_enum`. IsolationResponse string `xml:"isolationResponse,omitempty" json:"isolationResponse,omitempty"` // Configuration for the VM Health Monitoring Service. - VmToolsMonitoringSettings *ClusterVmToolsMonitoringSettings `xml:"vmToolsMonitoringSettings,omitempty" json:"vmToolsMonitoringSettings,omitempty" vim:"4.0"` + VmToolsMonitoringSettings *ClusterVmToolsMonitoringSettings `xml:"vmToolsMonitoringSettings,omitempty" json:"vmToolsMonitoringSettings,omitempty"` // Configuration for the VM Component Protection Service. - VmComponentProtectionSettings *ClusterVmComponentProtectionSettings `xml:"vmComponentProtectionSettings,omitempty" json:"vmComponentProtectionSettings,omitempty" vim:"6.0"` + VmComponentProtectionSettings *ClusterVmComponentProtectionSettings `xml:"vmComponentProtectionSettings,omitempty" json:"vmComponentProtectionSettings,omitempty"` } func init() { t["ClusterDasVmSettings"] = reflect.TypeOf((*ClusterDasVmSettings)(nil)).Elem() - minAPIVersionForType["ClusterDasVmSettings"] = "2.5" } // An incremental update to a Datastore list. type ClusterDatastoreUpdateSpec struct { ArrayUpdateSpec + // Refers instance of `Datastore`. Datastore *ManagedObjectReference `xml:"datastore,omitempty" json:"datastore,omitempty"` } @@ -12338,7 +12385,6 @@ type ClusterDependencyRuleInfo struct { func init() { t["ClusterDependencyRuleInfo"] = reflect.TypeOf((*ClusterDependencyRuleInfo)(nil)).Elem() - minAPIVersionForType["ClusterDependencyRuleInfo"] = "6.5" } // This event records when a cluster is destroyed. @@ -12373,7 +12419,7 @@ type ClusterDpmConfigInfo struct { // // Ratings vary from 1 to 5. This setting applies // to both manual and automated (@link DpmBehavior) DPM clusters. - HostPowerActionRate int32 `xml:"hostPowerActionRate,omitempty" json:"hostPowerActionRate,omitempty" vim:"4.0"` + HostPowerActionRate int32 `xml:"hostPowerActionRate,omitempty" json:"hostPowerActionRate,omitempty"` // Deprecated as of vSphere API 4.1, use // `ClusterDrsConfigInfo.option`. // @@ -12383,7 +12429,6 @@ type ClusterDpmConfigInfo struct { func init() { t["ClusterDpmConfigInfo"] = reflect.TypeOf((*ClusterDpmConfigInfo)(nil)).Elem() - minAPIVersionForType["ClusterDpmConfigInfo"] = "2.5" } // DPM configuration for a single host. @@ -12415,7 +12460,6 @@ type ClusterDpmHostConfigInfo struct { func init() { t["ClusterDpmHostConfigInfo"] = reflect.TypeOf((*ClusterDpmHostConfigInfo)(nil)).Elem() - minAPIVersionForType["ClusterDpmHostConfigInfo"] = "2.5" } // The `ClusterDpmHostConfigSpec` data object provides information @@ -12461,7 +12505,6 @@ type ClusterDpmHostConfigSpec struct { func init() { t["ClusterDpmHostConfigSpec"] = reflect.TypeOf((*ClusterDpmHostConfigSpec)(nil)).Elem() - minAPIVersionForType["ClusterDpmHostConfigSpec"] = "2.5" } // The `ClusterDrsConfigInfo` data object contains configuration information @@ -12507,7 +12550,7 @@ type ClusterDrsConfigInfo struct { // you cannot override the virtual machine setting // (`ClusterConfigSpecEx.drsVmConfigSpec`) // for Fault Tolerance virtual machines. - EnableVmBehaviorOverrides *bool `xml:"enableVmBehaviorOverrides" json:"enableVmBehaviorOverrides,omitempty" vim:"4.0"` + EnableVmBehaviorOverrides *bool `xml:"enableVmBehaviorOverrides" json:"enableVmBehaviorOverrides,omitempty"` // Specifies the cluster-wide default DRS behavior for virtual machines. // // You can override the default behavior for a virtual machine @@ -12535,7 +12578,7 @@ type ClusterDrsConfigInfo struct { // is equivalent to setting the // `ResourceConfigSpec.scaleDescendantsShares` on the root // resource pool. - ScaleDescendantsShares string `xml:"scaleDescendantsShares,omitempty" json:"scaleDescendantsShares,omitempty" vim:"7.0"` + ScaleDescendantsShares string `xml:"scaleDescendantsShares,omitempty" json:"scaleDescendantsShares,omitempty"` // Advanced settings. Option []BaseOptionValue `xml:"option,omitempty,typeattr" json:"option,omitempty"` } @@ -12560,7 +12603,6 @@ type ClusterDrsFaults struct { func init() { t["ClusterDrsFaults"] = reflect.TypeOf((*ClusterDrsFaults)(nil)).Elem() - minAPIVersionForType["ClusterDrsFaults"] = "4.0" } // The faults generated by storage DRS when it tries to move a virtual disk. @@ -12577,7 +12619,6 @@ type ClusterDrsFaultsFaultsByVirtualDisk struct { func init() { t["ClusterDrsFaultsFaultsByVirtualDisk"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVirtualDisk)(nil)).Elem() - minAPIVersionForType["ClusterDrsFaultsFaultsByVirtualDisk"] = "5.0" } // FaultsByVm is the faults generated by DRS when it tries to @@ -12597,7 +12638,6 @@ type ClusterDrsFaultsFaultsByVm struct { func init() { t["ClusterDrsFaultsFaultsByVm"] = reflect.TypeOf((*ClusterDrsFaultsFaultsByVm)(nil)).Elem() - minAPIVersionForType["ClusterDrsFaultsFaultsByVm"] = "4.0" } // Describes a single virtual machine migration. @@ -12827,7 +12867,8 @@ type ClusterEnterMaintenanceModeRequestType struct { // An array of `OptionValue` // options for this query. The specified options override the // advanced options in `ClusterDrsConfigInfo`. - Option []BaseOptionValue `xml:"option,omitempty,typeattr" json:"option,omitempty"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr" json:"option,omitempty"` + Info *ClusterComputeResourceMaintenanceInfo `xml:"info,omitempty" json:"info,omitempty" vim:"8.0.3.0"` } func init() { @@ -12860,7 +12901,6 @@ type ClusterEnterMaintenanceResult struct { func init() { t["ClusterEnterMaintenanceResult"] = reflect.TypeOf((*ClusterEnterMaintenanceResult)(nil)).Elem() - minAPIVersionForType["ClusterEnterMaintenanceResult"] = "5.0" } // These are cluster events. @@ -12883,7 +12923,6 @@ type ClusterFailoverHostAdmissionControlInfo struct { func init() { t["ClusterFailoverHostAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlInfo)(nil)).Elem() - minAPIVersionForType["ClusterFailoverHostAdmissionControlInfo"] = "4.0" } // Data object containing the status of a failover host. @@ -12907,7 +12946,6 @@ type ClusterFailoverHostAdmissionControlInfoHostStatus struct { func init() { t["ClusterFailoverHostAdmissionControlInfoHostStatus"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlInfoHostStatus)(nil)).Elem() - minAPIVersionForType["ClusterFailoverHostAdmissionControlInfoHostStatus"] = "4.0" } // The `ClusterFailoverHostAdmissionControlPolicy` dedicates @@ -12939,12 +12977,11 @@ type ClusterFailoverHostAdmissionControlPolicy struct { // sufficient resources to restart virtual machines on available hosts. // // If not set, we assume 1. - FailoverLevel int32 `xml:"failoverLevel,omitempty" json:"failoverLevel,omitempty" vim:"6.5"` + FailoverLevel int32 `xml:"failoverLevel,omitempty" json:"failoverLevel,omitempty"` } func init() { t["ClusterFailoverHostAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverHostAdmissionControlPolicy)(nil)).Elem() - minAPIVersionForType["ClusterFailoverHostAdmissionControlPolicy"] = "4.0" } // The current admission control related information if the cluster was @@ -12963,7 +13000,6 @@ type ClusterFailoverLevelAdmissionControlInfo struct { func init() { t["ClusterFailoverLevelAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverLevelAdmissionControlInfo)(nil)).Elem() - minAPIVersionForType["ClusterFailoverLevelAdmissionControlInfo"] = "4.0" } // The `ClusterFailoverLevelAdmissionControlPolicy` @@ -13029,12 +13065,11 @@ type ClusterFailoverLevelAdmissionControlPolicy struct { // If left unset, the slot is // computed using the maximum reservations and memory overhead of any // powered on virtual machine in the cluster. - SlotPolicy BaseClusterSlotPolicy `xml:"slotPolicy,omitempty,typeattr" json:"slotPolicy,omitempty" vim:"5.1"` + SlotPolicy BaseClusterSlotPolicy `xml:"slotPolicy,omitempty,typeattr" json:"slotPolicy,omitempty"` } func init() { t["ClusterFailoverLevelAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverLevelAdmissionControlPolicy)(nil)).Elem() - minAPIVersionForType["ClusterFailoverLevelAdmissionControlPolicy"] = "4.0" } // The current admission control related information if the cluster was configured @@ -13053,7 +13088,6 @@ type ClusterFailoverResourcesAdmissionControlInfo struct { func init() { t["ClusterFailoverResourcesAdmissionControlInfo"] = reflect.TypeOf((*ClusterFailoverResourcesAdmissionControlInfo)(nil)).Elem() - minAPIVersionForType["ClusterFailoverResourcesAdmissionControlInfo"] = "4.0" } // The `ClusterFailoverResourcesAdmissionControlPolicy` @@ -13095,7 +13129,7 @@ type ClusterFailoverResourcesAdmissionControlPolicy struct { // sufficient resources to restart virtual machines on available hosts. // // If not set, we assume 1. - FailoverLevel int32 `xml:"failoverLevel,omitempty" json:"failoverLevel,omitempty" vim:"6.5"` + FailoverLevel int32 `xml:"failoverLevel,omitempty" json:"failoverLevel,omitempty"` // Flag to enable user input values for // `ClusterFailoverResourcesAdmissionControlPolicy.cpuFailoverResourcesPercent` // and @@ -13106,7 +13140,7 @@ type ClusterFailoverResourcesAdmissionControlPolicy struct { // // If users want to override the percentage values, // they must disable the auto-compute by setting this field to false. - AutoComputePercentages *bool `xml:"autoComputePercentages" json:"autoComputePercentages,omitempty" vim:"6.5"` + AutoComputePercentages *bool `xml:"autoComputePercentages" json:"autoComputePercentages,omitempty"` // Percentage of persistent memory resources in the cluster to reserve for // the failover. // @@ -13124,7 +13158,6 @@ type ClusterFailoverResourcesAdmissionControlPolicy struct { func init() { t["ClusterFailoverResourcesAdmissionControlPolicy"] = reflect.TypeOf((*ClusterFailoverResourcesAdmissionControlPolicy)(nil)).Elem() - minAPIVersionForType["ClusterFailoverResourcesAdmissionControlPolicy"] = "4.0" } // This policy allows setting a fixed slot size @@ -13139,7 +13172,55 @@ type ClusterFixedSizeSlotPolicy struct { func init() { t["ClusterFixedSizeSlotPolicy"] = reflect.TypeOf((*ClusterFixedSizeSlotPolicy)(nil)).Elem() - minAPIVersionForType["ClusterFixedSizeSlotPolicy"] = "5.1" +} + +// An `ClusterFtVmHostRuleInfo` object provides control of the +// placement of virtual machines across two host groups. The virtual machines +// and hosts referenced by an FT VM-Host rule must be in the same cluster. +// +// An FT VM-Host rule identifies the following groups. +// - A virtual machine group name (`ClusterVmGroup`). +// - An array of two host groups (`ClusterHostGroup`). +// +// `ClusterFtVmHostRuleInfo` stores only the names of the relevant +// virtual machine and host groups. The group contents are stored in +// the virtual machine and host group objects. +// +// When employing this rule, take care to ensure that the specified +// host groups have sufficient resources to support the requirements +// of all VMs specified. +type ClusterFtVmHostRuleInfo struct { + ClusterRuleInfo + + // Virtual machine group name + // (`ClusterVmGroup*.*ClusterGroupInfo.name`). + // + // The named virtual machine group may have zero or more VMs. + // A virtual machine in this group may be a normal virtual machine + // or a fault tolerant primary virtual machine; it cannot + // be a fault tolerant secondary virtual machine. + // + // Control of FT secondary virtual machines is implied by the presence + // of the primary FT virtual machine. + // + // A virtual machine in this group should not be referenced in any other + // FT VM-Host rule or VM-Host rule `ClusterVmHostRuleInfo`. + VmGroupName string `xml:"vmGroupName" json:"vmGroupName"` + // Array of two Host Groups (`ClusterHostGroup`). + // + // The hostGroup array must have two host groups. Each host group in the + // hostGroup array will have a set of hosts. For each Fault Tolerance primary + // VM that is part of VmGroup, the primary and secondary VMs would be placed + // on hosts that are not part of the same host group. + // + // The members of each host group should be disjoint from the members + // of all other host group specified. + HostGroupName []string `xml:"hostGroupName,omitempty" json:"hostGroupName,omitempty"` +} + +func init() { + t["ClusterFtVmHostRuleInfo"] = reflect.TypeOf((*ClusterFtVmHostRuleInfo)(nil)).Elem() + minAPIVersionForType["ClusterFtVmHostRuleInfo"] = "8.0.3.0" } // `ClusterGroupInfo` is the base type for all virtual machine @@ -13153,17 +13234,16 @@ type ClusterGroupInfo struct { // Unique name of the group. Name string `xml:"name" json:"name"` // Flag to indicate whether the group is created by the user or the system. - UserCreated *bool `xml:"userCreated" json:"userCreated,omitempty" vim:"5.0"` + UserCreated *bool `xml:"userCreated" json:"userCreated,omitempty"` // Unique ID for the group. // // uniqueID is unique within a cluster. // Groups residing in different clusters might share a uniqueID. - UniqueID string `xml:"uniqueID,omitempty" json:"uniqueID,omitempty" vim:"6.0"` + UniqueID string `xml:"uniqueID,omitempty" json:"uniqueID,omitempty"` } func init() { t["ClusterGroupInfo"] = reflect.TypeOf((*ClusterGroupInfo)(nil)).Elem() - minAPIVersionForType["ClusterGroupInfo"] = "4.1" } // An incremental update to the cluster-wide groups. @@ -13175,7 +13255,6 @@ type ClusterGroupSpec struct { func init() { t["ClusterGroupSpec"] = reflect.TypeOf((*ClusterGroupSpec)(nil)).Elem() - minAPIVersionForType["ClusterGroupSpec"] = "4.1" } // The `ClusterHostGroup` data object identifies hosts for VM-Host rules. @@ -13196,7 +13275,6 @@ type ClusterHostGroup struct { func init() { t["ClusterHostGroup"] = reflect.TypeOf((*ClusterHostGroup)(nil)).Elem() - minAPIVersionForType["ClusterHostGroup"] = "4.1" } // Describes a HostSystem's quarantine or maintenance mode change action. @@ -13212,7 +13290,6 @@ type ClusterHostInfraUpdateHaModeAction struct { func init() { t["ClusterHostInfraUpdateHaModeAction"] = reflect.TypeOf((*ClusterHostInfraUpdateHaModeAction)(nil)).Elem() - minAPIVersionForType["ClusterHostInfraUpdateHaModeAction"] = "6.5" } // Describes a single host power action. @@ -13246,7 +13323,6 @@ type ClusterHostPowerAction struct { func init() { t["ClusterHostPowerAction"] = reflect.TypeOf((*ClusterHostPowerAction)(nil)).Elem() - minAPIVersionForType["ClusterHostPowerAction"] = "2.5" } // A DRS recommended host for either powering on, resuming or @@ -13313,7 +13389,6 @@ type ClusterInfraUpdateHaConfigInfo struct { func init() { t["ClusterInfraUpdateHaConfigInfo"] = reflect.TypeOf((*ClusterInfraUpdateHaConfigInfo)(nil)).Elem() - minAPIVersionForType["ClusterInfraUpdateHaConfigInfo"] = "6.5" } // Describes an initial placement of a single virtual machine @@ -13333,7 +13408,6 @@ type ClusterInitialPlacementAction struct { func init() { t["ClusterInitialPlacementAction"] = reflect.TypeOf((*ClusterInitialPlacementAction)(nil)).Elem() - minAPIVersionForType["ClusterInitialPlacementAction"] = "2.5" } // Information about an IO Filter on a compute resource. @@ -13352,12 +13426,11 @@ type ClusterIoFilterInfo struct { // The URL of the VIB package that the IO Filter is installed from. // // The property is unset if the information is not available. - VibUrl string `xml:"vibUrl,omitempty" json:"vibUrl,omitempty" vim:"6.5"` + VibUrl string `xml:"vibUrl,omitempty" json:"vibUrl,omitempty"` } func init() { t["ClusterIoFilterInfo"] = reflect.TypeOf((*ClusterIoFilterInfo)(nil)).Elem() - minAPIVersionForType["ClusterIoFilterInfo"] = "6.0" } // Describes a single VM migration action. @@ -13370,7 +13443,6 @@ type ClusterMigrationAction struct { func init() { t["ClusterMigrationAction"] = reflect.TypeOf((*ClusterMigrationAction)(nil)).Elem() - minAPIVersionForType["ClusterMigrationAction"] = "2.5" } // The Cluster network config spec allows specification of @@ -13397,7 +13469,6 @@ type ClusterNetworkConfigSpec struct { func init() { t["ClusterNetworkConfigSpec"] = reflect.TypeOf((*ClusterNetworkConfigSpec)(nil)).Elem() - minAPIVersionForType["ClusterNetworkConfigSpec"] = "6.5" } // This data class reports one virtual machine powerOn failure. @@ -13414,7 +13485,6 @@ type ClusterNotAttemptedVmInfo struct { func init() { t["ClusterNotAttemptedVmInfo"] = reflect.TypeOf((*ClusterNotAttemptedVmInfo)(nil)).Elem() - minAPIVersionForType["ClusterNotAttemptedVmInfo"] = "2.5" } // vSphere cluster VM orchestration settings. @@ -13438,7 +13508,6 @@ type ClusterOrchestrationInfo struct { func init() { t["ClusterOrchestrationInfo"] = reflect.TypeOf((*ClusterOrchestrationInfo)(nil)).Elem() - minAPIVersionForType["ClusterOrchestrationInfo"] = "6.5" } // This event records when a cluster's host capacity cannot satisfy resource @@ -13470,7 +13539,6 @@ type ClusterPowerOnVmResult struct { func init() { t["ClusterPowerOnVmResult"] = reflect.TypeOf((*ClusterPowerOnVmResult)(nil)).Elem() - minAPIVersionForType["ClusterPowerOnVmResult"] = "2.5" } // The `ClusterPreemptibleVmPairInfo` data object contains the monitored and the @@ -13563,7 +13631,6 @@ type ClusterProfileCompleteConfigSpec struct { func init() { t["ClusterProfileCompleteConfigSpec"] = reflect.TypeOf((*ClusterProfileCompleteConfigSpec)(nil)).Elem() - minAPIVersionForType["ClusterProfileCompleteConfigSpec"] = "4.0" } type ClusterProfileConfigInfo struct { @@ -13594,7 +13661,6 @@ type ClusterProfileConfigServiceCreateSpec struct { func init() { t["ClusterProfileConfigServiceCreateSpec"] = reflect.TypeOf((*ClusterProfileConfigServiceCreateSpec)(nil)).Elem() - minAPIVersionForType["ClusterProfileConfigServiceCreateSpec"] = "4.0" } // DataObject which is a baseclass for other configuration @@ -13605,7 +13671,6 @@ type ClusterProfileConfigSpec struct { func init() { t["ClusterProfileConfigSpec"] = reflect.TypeOf((*ClusterProfileConfigSpec)(nil)).Elem() - minAPIVersionForType["ClusterProfileConfigSpec"] = "4.0" } // Base class for Cluster CreateSpecs @@ -13615,7 +13680,6 @@ type ClusterProfileCreateSpec struct { func init() { t["ClusterProfileCreateSpec"] = reflect.TypeOf((*ClusterProfileCreateSpec)(nil)).Elem() - minAPIVersionForType["ClusterProfileCreateSpec"] = "4.0" } // Recommendation is the base class for any packaged group of @@ -13644,9 +13708,9 @@ type ClusterRecommendation struct { ReasonText string `xml:"reasonText" json:"reasonText"` // Text that provides warnings about potential adverse implications of // applying this recommendation - WarningText string `xml:"warningText,omitempty" json:"warningText,omitempty" vim:"6.0"` + WarningText string `xml:"warningText,omitempty" json:"warningText,omitempty"` // Warning about potential adverse implications of applying a recommendation - WarningDetails *LocalizableMessage `xml:"warningDetails,omitempty" json:"warningDetails,omitempty" vim:"6.0"` + WarningDetails *LocalizableMessage `xml:"warningDetails,omitempty" json:"warningDetails,omitempty"` // This recommendation may depend on some other recommendations. // // The prerequisite recommendations are listed by their keys. @@ -13659,7 +13723,6 @@ type ClusterRecommendation struct { func init() { t["ClusterRecommendation"] = reflect.TypeOf((*ClusterRecommendation)(nil)).Elem() - minAPIVersionForType["ClusterRecommendation"] = "2.5" } // This event records when a cluster is reconfigured. @@ -13667,7 +13730,7 @@ type ClusterReconfiguredEvent struct { ClusterEvent // The configuration values changed during the reconfiguration. - ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { @@ -13691,7 +13754,6 @@ type ClusterResourceUsageSummary struct { func init() { t["ClusterResourceUsageSummary"] = reflect.TypeOf((*ClusterResourceUsageSummary)(nil)).Elem() - minAPIVersionForType["ClusterResourceUsageSummary"] = "6.0" } // The `ClusterRuleInfo` data object is the base type for affinity @@ -13779,27 +13841,27 @@ type ClusterRuleInfo struct { // Flag to indicate whether compliance with this rule is mandatory or optional. // // The default value is false (optional). - // - A mandatory rule will prevent a virtual machine from being powered on - // or migrated to a host that does not satisfy the rule. - // - An optional rule specifies a preference. DRS takes an optional rule - // into consideration when it places a virtual machine in the cluster. - // DRS will act on an optional rule as long as it does not impact - // the ability of the host to satisfy current CPU or memory requirements - // for virtual machines on the system. (As long as the operation does not - // cause any host to be more than 100% utilized.) - Mandatory *bool `xml:"mandatory" json:"mandatory,omitempty" vim:"4.1"` + // - A mandatory rule will prevent a virtual machine from being powered on + // or migrated to a host that does not satisfy the rule. + // - An optional rule specifies a preference. DRS takes an optional rule + // into consideration when it places a virtual machine in the cluster. + // DRS will act on an optional rule as long as it does not impact + // the ability of the host to satisfy current CPU or memory requirements + // for virtual machines on the system. (As long as the operation does not + // cause any host to be more than 100% utilized.) + Mandatory *bool `xml:"mandatory" json:"mandatory,omitempty"` // Flag to indicate whether the rule is created by the user or the system. - UserCreated *bool `xml:"userCreated" json:"userCreated,omitempty" vim:"4.1"` + UserCreated *bool `xml:"userCreated" json:"userCreated,omitempty"` // Flag to indicate whether or not the placement of Virtual Machines is currently // in compliance with this rule. // // The Server does not currently use this property. - InCompliance *bool `xml:"inCompliance" json:"inCompliance,omitempty" vim:"4.1"` + InCompliance *bool `xml:"inCompliance" json:"inCompliance,omitempty"` // UUID for the rule. // // When adding a new rule, do not specify this // property. The Server will assign the key. - RuleUuid string `xml:"ruleUuid,omitempty" json:"ruleUuid,omitempty" vim:"6.0"` + RuleUuid string `xml:"ruleUuid,omitempty" json:"ruleUuid,omitempty"` } func init() { @@ -13835,7 +13897,6 @@ type ClusterSlotPolicy struct { func init() { t["ClusterSlotPolicy"] = reflect.TypeOf((*ClusterSlotPolicy)(nil)).Elem() - minAPIVersionForType["ClusterSlotPolicy"] = "5.1" } // This event records when a cluster's overall status changed. @@ -13957,7 +14018,6 @@ type ClusterUsageSummary struct { func init() { t["ClusterUsageSummary"] = reflect.TypeOf((*ClusterUsageSummary)(nil)).Elem() - minAPIVersionForType["ClusterUsageSummary"] = "6.0" } // vSphere HA Virtual Machine Component Protection Service settings. @@ -13988,19 +14048,19 @@ type ClusterVmComponentProtectionSettings struct { // When an APD condition happens and the host begins timing out I/Os // (@link vim.host.MountInfo.InaccessibleReason#AllPathsDown\_Timeout}, VM Component // Protection service will react based on the specific value of this property: - // - `**disabled**`, no reaction, i.e., no - // VM failover and no event reporting for the failures. - // - `**warning**`, service will issue events, - // alarms and/or config issues for component failures. - // - `**restartConservative**`, service will - // terminate the impacted VMs after a preconfigured time interval - // (`ClusterVmComponentProtectionSettings.vmTerminateDelayForAPDSec`) if they are to be restarted. - // - `**restartAggressive**`, service might - // terminate the impacted VMs after a preconfigured time interval - // (`ClusterVmComponentProtectionSettings.vmTerminateDelayForAPDSec`). In some cases, a VM is terminated - // even if it may not able to be restarted or lose Fault Tolerance redundancy. - // - `**clusterDefault**`, service will implement - // cluster default. + // - `**disabled**`, no reaction, i.e., no + // VM failover and no event reporting for the failures. + // - `**warning**`, service will issue events, + // alarms and/or config issues for component failures. + // - `**restartConservative**`, service will + // terminate the impacted VMs after a preconfigured time interval + // (`ClusterVmComponentProtectionSettings.vmTerminateDelayForAPDSec`) if they are to be restarted. + // - `**restartAggressive**`, service might + // terminate the impacted VMs after a preconfigured time interval + // (`ClusterVmComponentProtectionSettings.vmTerminateDelayForAPDSec`). In some cases, a VM is terminated + // even if it may not able to be restarted or lose Fault Tolerance redundancy. + // - `**clusterDefault**`, service will implement + // cluster default. VmStorageProtectionForAPD string `xml:"vmStorageProtectionForAPD,omitempty" json:"vmStorageProtectionForAPD,omitempty"` // This property indicates if APD timeout will be enabled for all the hosts // in the cluster when vSphere HA is configured. @@ -14059,7 +14119,6 @@ type ClusterVmComponentProtectionSettings struct { func init() { t["ClusterVmComponentProtectionSettings"] = reflect.TypeOf((*ClusterVmComponentProtectionSettings)(nil)).Elem() - minAPIVersionForType["ClusterVmComponentProtectionSettings"] = "6.0" } // The `ClusterVmGroup` data object identifies virtual machines @@ -14086,7 +14145,6 @@ type ClusterVmGroup struct { func init() { t["ClusterVmGroup"] = reflect.TypeOf((*ClusterVmGroup)(nil)).Elem() - minAPIVersionForType["ClusterVmGroup"] = "4.1" } // A `ClusterVmHostRuleInfo` object identifies virtual machines @@ -14132,7 +14190,6 @@ type ClusterVmHostRuleInfo struct { func init() { t["ClusterVmHostRuleInfo"] = reflect.TypeOf((*ClusterVmHostRuleInfo)(nil)).Elem() - minAPIVersionForType["ClusterVmHostRuleInfo"] = "4.1" } // The `ClusterVmOrchestrationInfo` data object contains the orchestration @@ -14157,7 +14214,6 @@ type ClusterVmOrchestrationInfo struct { func init() { t["ClusterVmOrchestrationInfo"] = reflect.TypeOf((*ClusterVmOrchestrationInfo)(nil)).Elem() - minAPIVersionForType["ClusterVmOrchestrationInfo"] = "6.5" } // An incremental update to the per-VM orchestration config. @@ -14169,7 +14225,6 @@ type ClusterVmOrchestrationSpec struct { func init() { t["ClusterVmOrchestrationSpec"] = reflect.TypeOf((*ClusterVmOrchestrationSpec)(nil)).Elem() - minAPIVersionForType["ClusterVmOrchestrationSpec"] = "6.5" } // VM readiness policy specifies when a VM is deemed ready. @@ -14199,7 +14254,6 @@ type ClusterVmReadiness struct { func init() { t["ClusterVmReadiness"] = reflect.TypeOf((*ClusterVmReadiness)(nil)).Elem() - minAPIVersionForType["ClusterVmReadiness"] = "6.5" } // The `ClusterVmToolsMonitoringSettings` data object contains @@ -14228,9 +14282,9 @@ type ClusterVmToolsMonitoringSettings struct { // // Specify a string value corresponding to one of the // following `ClusterDasConfigInfoVmMonitoringState_enum` values: - // - vmMonitoringDisabled (the default value) - // - vmMonitoringOnly - // - vmAndAppMonitoring + // - vmMonitoringDisabled (the default value) + // - vmMonitoringOnly + // - vmAndAppMonitoring // // The individual VMware Tools setting for virtual machine monitoring depends on // the HA Virtual Machine Health Monitoring Service level that is @@ -14238,12 +14292,12 @@ type ClusterVmToolsMonitoringSettings struct { // (`ClusterDasConfigInfo*.*ClusterDasConfigInfo.vmMonitoring`). // The following list indicates the supported VMware Tools vmMonitoring values // according to the cluster configuration. - // - If the cluster configuration specifies vmMonitoringDisabled, - // the Service is disabled and the HA Service ignores the VMware Tools monitoring setting. - // - If the cluster configuration specifies vmMonitoringOnly, - // the Service supports vmMonitoringOnly or vmMonitoringDisabled only. - // - If the cluster configuration specifies vmAndAppMonitoring, - // you can use any of the `ClusterDasConfigInfoVmMonitoringState_enum` values. + // - If the cluster configuration specifies vmMonitoringDisabled, + // the Service is disabled and the HA Service ignores the VMware Tools monitoring setting. + // - If the cluster configuration specifies vmMonitoringOnly, + // the Service supports vmMonitoringOnly or vmMonitoringDisabled only. + // - If the cluster configuration specifies vmAndAppMonitoring, + // you can use any of the `ClusterDasConfigInfoVmMonitoringState_enum` values. // // The `ClusterVmToolsMonitoringSettings.clusterSettings` value has no // effect on the constraint imposed by the HA Virtual Machine Health Monitoring Service @@ -14254,7 +14308,7 @@ type ClusterVmToolsMonitoringSettings struct { // currently configured type of virtual machine monitoring. // You can use these events even if monitoring is being disabled // or set to vmMonitoringOnly. - VmMonitoring string `xml:"vmMonitoring,omitempty" json:"vmMonitoring,omitempty" vim:"4.1"` + VmMonitoring string `xml:"vmMonitoring,omitempty" json:"vmMonitoring,omitempty"` // Flag indicating whether to use the cluster settings or the per VM settings. // // The default value is true. @@ -14296,7 +14350,6 @@ type ClusterVmToolsMonitoringSettings struct { func init() { t["ClusterVmToolsMonitoringSettings"] = reflect.TypeOf((*ClusterVmToolsMonitoringSettings)(nil)).Elem() - minAPIVersionForType["ClusterVmToolsMonitoringSettings"] = "4.0" } // The distributed virtual switch received a reconfiguration request to @@ -14310,7 +14363,6 @@ type CollectorAddressUnset struct { func init() { t["CollectorAddressUnset"] = reflect.TypeOf((*CollectorAddressUnset)(nil)).Elem() - minAPIVersionForType["CollectorAddressUnset"] = "5.1" } type CollectorAddressUnsetFault CollectorAddressUnset @@ -14333,7 +14385,7 @@ type ComplianceFailure struct { // If complianceStatus is non-compliant, failureValues will // contain values of the non-compliant fields on the host and // in the profile. - FailureValues []ComplianceFailureComplianceFailureValues `xml:"failureValues,omitempty" json:"failureValues,omitempty" vim:"6.5"` + FailureValues []ComplianceFailureComplianceFailureValues `xml:"failureValues,omitempty" json:"failureValues,omitempty"` } func init() { @@ -14374,7 +14426,6 @@ type ComplianceLocator struct { func init() { t["ComplianceLocator"] = reflect.TypeOf((*ComplianceLocator)(nil)).Elem() - minAPIVersionForType["ComplianceLocator"] = "4.0" } // DataObject contains the verifications that need to be done @@ -14390,7 +14441,6 @@ type ComplianceProfile struct { func init() { t["ComplianceProfile"] = reflect.TypeOf((*ComplianceProfile)(nil)).Elem() - minAPIVersionForType["ComplianceProfile"] = "4.0" } // DataObject representing the result from a ComplianceCheck @@ -14420,13 +14470,14 @@ type ComplianceResult struct { func init() { t["ComplianceResult"] = reflect.TypeOf((*ComplianceResult)(nil)).Elem() - minAPIVersionForType["ComplianceResult"] = "4.0" } // The parameters of `HostProfileManager.CompositeHostProfile_Task`. type CompositeHostProfileRequestType struct { - This ManagedObjectReference `xml:"_this" json:"-"` - Source ManagedObjectReference `xml:"source" json:"source"` + This ManagedObjectReference `xml:"_this" json:"-"` + // Refers instance of `Profile`. + Source ManagedObjectReference `xml:"source" json:"source"` + // Refers instances of `Profile`. Targets []ManagedObjectReference `xml:"targets,omitempty" json:"targets,omitempty"` ToBeMerged *HostApplyProfile `xml:"toBeMerged,omitempty" json:"toBeMerged,omitempty"` ToBeReplacedWith *HostApplyProfile `xml:"toBeReplacedWith,omitempty" json:"toBeReplacedWith,omitempty"` @@ -14473,7 +14524,6 @@ type CompositePolicyOption struct { func init() { t["CompositePolicyOption"] = reflect.TypeOf((*CompositePolicyOption)(nil)).Elem() - minAPIVersionForType["CompositePolicyOption"] = "4.0" } type ComputeDiskPartitionInfo ComputeDiskPartitionInfoRequestType @@ -14501,7 +14551,7 @@ type ComputeDiskPartitionInfoForResizeRequestType struct { // computed from the block range. // If partitionFormat is not specified, the existing partitionFormat // on disk is used, if the disk is not blank and mbr otherwise. - PartitionFormat string `xml:"partitionFormat,omitempty" json:"partitionFormat,omitempty" vim:"5.0"` + PartitionFormat string `xml:"partitionFormat,omitempty" json:"partitionFormat,omitempty"` } func init() { @@ -14523,7 +14573,7 @@ type ComputeDiskPartitionInfoRequestType struct { // computed from the block range. // If partitionFormat is not specified, the existing partitionFormat // on disk is used, if the disk is not blank and mbr otherwise. - PartitionFormat string `xml:"partitionFormat,omitempty" json:"partitionFormat,omitempty" vim:"5.0"` + PartitionFormat string `xml:"partitionFormat,omitempty" json:"partitionFormat,omitempty"` } func init() { @@ -14546,17 +14596,17 @@ type ComputeResourceConfigInfo struct { // property; the default is "vmDirectory". This setting will be honored // for each virtual machine within the compute resource for which the // following is true: - // - The virtual machine is executing on a host that has the - // `perVmSwapFiles` capability. - // - The virtual machine configuration's - // `swapPlacement` property is set - // to "inherit". + // - The virtual machine is executing on a host that has the + // `perVmSwapFiles` capability. + // - The virtual machine configuration's + // `swapPlacement` property is set + // to "inherit". // // See also `VirtualMachineConfigInfoSwapPlacementType_enum`. VmSwapPlacement string `xml:"vmSwapPlacement" json:"vmSwapPlacement"` // Flag indicating whether or not the SPBM(Storage Policy Based Management) // feature is enabled on this compute resource - SpbmEnabled *bool `xml:"spbmEnabled" json:"spbmEnabled,omitempty" vim:"5.0"` + SpbmEnabled *bool `xml:"spbmEnabled" json:"spbmEnabled,omitempty"` // Key for Default Hardware Version used on this compute resource // in the format of `VirtualMachineConfigOptionDescriptor.key`. // @@ -14564,7 +14614,7 @@ type ComputeResourceConfigInfo struct { // `VirtualMachineConfigOptionDescriptor.defaultConfigOption` returned // by `ComputeResource.environmentBrowser` of this object and all its children // with this field unset. - DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty" json:"defaultHardwareVersionKey,omitempty" vim:"5.1"` + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty" json:"defaultHardwareVersionKey,omitempty"` // Key for Maximum Hardware Version used on this compute resource // in the format of `VirtualMachineConfigOptionDescriptor.key`. // @@ -14577,7 +14627,6 @@ type ComputeResourceConfigInfo struct { func init() { t["ComputeResourceConfigInfo"] = reflect.TypeOf((*ComputeResourceConfigInfo)(nil)).Elem() - minAPIVersionForType["ComputeResourceConfigInfo"] = "2.5" } // Changes to apply to the compute resource configuration. @@ -14597,7 +14646,7 @@ type ComputeResourceConfigSpec struct { VmSwapPlacement string `xml:"vmSwapPlacement,omitempty" json:"vmSwapPlacement,omitempty"` // Flag indicating whether or not the SPBM(Storage Policy Based Management) // feature is enabled on this compute resource - SpbmEnabled *bool `xml:"spbmEnabled" json:"spbmEnabled,omitempty" vim:"5.0"` + SpbmEnabled *bool `xml:"spbmEnabled" json:"spbmEnabled,omitempty"` // Key for Default Hardware Version to be used on this compute resource // in the format of `VirtualMachineConfigOptionDescriptor.key`. // @@ -14605,12 +14654,12 @@ type ComputeResourceConfigSpec struct { // `VirtualMachineConfigOptionDescriptor.defaultConfigOption` returned // by `ComputeResource.environmentBrowser` of this object and all its children // with this field unset. - DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty" json:"defaultHardwareVersionKey,omitempty" vim:"5.1"` + DefaultHardwareVersionKey string `xml:"defaultHardwareVersionKey,omitempty" json:"defaultHardwareVersionKey,omitempty"` // Desired software spec for the set of physical compute resources. // // This // parameter is only supported in vim.Folder#createClusterEx operation. - DesiredSoftwareSpec *DesiredSoftwareSpec `xml:"desiredSoftwareSpec,omitempty" json:"desiredSoftwareSpec,omitempty" vim:"7.0"` + DesiredSoftwareSpec *DesiredSoftwareSpec `xml:"desiredSoftwareSpec,omitempty" json:"desiredSoftwareSpec,omitempty"` // Key for Maximum Hardware Version to be used on this compute resource // in the format of `VirtualMachineConfigOptionDescriptor.key`. // @@ -14627,11 +14676,12 @@ type ComputeResourceConfigSpec struct { // default. This parameter is only supported in `Folder.CreateClusterEx` // operation. EnableConfigManager *bool `xml:"enableConfigManager" json:"enableConfigManager,omitempty" vim:"7.0.3.1"` + // Specification for the host seeding operation. + HostSeedSpec *ComputeResourceHostSeedSpec `xml:"hostSeedSpec,omitempty" json:"hostSeedSpec,omitempty" vim:"8.0.3.0"` } func init() { t["ComputeResourceConfigSpec"] = reflect.TypeOf((*ComputeResourceConfigSpec)(nil)).Elem() - minAPIVersionForType["ComputeResourceConfigSpec"] = "2.5" } // The event argument is a ComputeResource object. @@ -14654,13 +14704,25 @@ func init() { type ComputeResourceHostSPBMLicenseInfo struct { DynamicData + // Refers instance of `HostSystem`. Host ManagedObjectReference `xml:"host" json:"host"` LicenseState ComputeResourceHostSPBMLicenseInfoHostSPBMLicenseState `xml:"licenseState" json:"licenseState"` } func init() { t["ComputeResourceHostSPBMLicenseInfo"] = reflect.TypeOf((*ComputeResourceHostSPBMLicenseInfo)(nil)).Elem() - minAPIVersionForType["ComputeResourceHostSPBMLicenseInfo"] = "5.0" +} + +type ComputeResourceHostSeedSpec struct { + DynamicData + + // Specification for the seed host. + SingleHostSpec ComputeResourceHostSeedSpecSingleHostSpec `xml:"singleHostSpec" json:"singleHostSpec"` +} + +func init() { + t["ComputeResourceHostSeedSpec"] = reflect.TypeOf((*ComputeResourceHostSeedSpec)(nil)).Elem() + minAPIVersionForType["ComputeResourceHostSeedSpec"] = "8.0.3.0" } // This data object contains a specification for a single candidate host @@ -14787,22 +14849,22 @@ type ConfigTarget struct { // Maximum number of CPUs available on a single host. // // For standalone hosts, this value will be the same as numCpus. - MaxCpusPerHost int32 `xml:"maxCpusPerHost,omitempty" json:"maxCpusPerHost,omitempty" vim:"7.0"` + MaxCpusPerHost int32 `xml:"maxCpusPerHost,omitempty" json:"maxCpusPerHost,omitempty"` // Presence of System Management Controller, indicates the host is // Apple hardware, and thus capable of running Mac OS guest as VM. - SmcPresent *bool `xml:"smcPresent" json:"smcPresent,omitempty" vim:"5.0"` + SmcPresent *bool `xml:"smcPresent" json:"smcPresent,omitempty"` // List of datastores available for virtual disks and associated storage. Datastore []VirtualMachineDatastoreInfo `xml:"datastore,omitempty" json:"datastore,omitempty"` // List of networks available for virtual network adapters. Network []VirtualMachineNetworkInfo `xml:"network,omitempty" json:"network,omitempty"` // List of opaque networks available for virtual network adapters. - OpaqueNetwork []OpaqueNetworkTargetInfo `xml:"opaqueNetwork,omitempty" json:"opaqueNetwork,omitempty" vim:"5.5"` + OpaqueNetwork []OpaqueNetworkTargetInfo `xml:"opaqueNetwork,omitempty" json:"opaqueNetwork,omitempty"` // List of networks available from DistributedVirtualSwitch for virtual // network adapters. - DistributedVirtualPortgroup []DistributedVirtualPortgroupInfo `xml:"distributedVirtualPortgroup,omitempty" json:"distributedVirtualPortgroup,omitempty" vim:"4.0"` + DistributedVirtualPortgroup []DistributedVirtualPortgroupInfo `xml:"distributedVirtualPortgroup,omitempty" json:"distributedVirtualPortgroup,omitempty"` // List of distributed virtual switch available for virtual network // adapters. - DistributedVirtualSwitch []DistributedVirtualSwitchInfo `xml:"distributedVirtualSwitch,omitempty" json:"distributedVirtualSwitch,omitempty" vim:"4.0"` + DistributedVirtualSwitch []DistributedVirtualSwitchInfo `xml:"distributedVirtualSwitch,omitempty" json:"distributedVirtualSwitch,omitempty"` // List of CD-ROM devices available for use by virtual CD-ROMs. // // Used for @@ -14822,13 +14884,13 @@ type ConfigTarget struct { // // Used for // `VirtualSoundCardDeviceBackingInfo`. - Sound []VirtualMachineSoundInfo `xml:"sound,omitempty" json:"sound,omitempty" vim:"2.5"` + Sound []VirtualMachineSoundInfo `xml:"sound,omitempty" json:"sound,omitempty"` // List of USB devices on the host that are available to support // virtualization. // // Used for // `VirtualUSBUSBBackingInfo`. - Usb []VirtualMachineUsbInfo `xml:"usb,omitempty" json:"usb,omitempty" vim:"2.5"` + Usb []VirtualMachineUsbInfo `xml:"usb,omitempty" json:"usb,omitempty"` // List of floppy devices available for use by virtual floppies. // // Used for @@ -14851,7 +14913,7 @@ type ConfigTarget struct { // `GuestOsDescriptor.supportedMaxMemMB`. When invoked on the // cluster, maximum size that can be created on at least one host // in the cluster is reported. - SupportedMaxMemMB int32 `xml:"supportedMaxMemMB,omitempty" json:"supportedMaxMemMB,omitempty" vim:"7.0"` + SupportedMaxMemMB int32 `xml:"supportedMaxMemMB,omitempty" json:"supportedMaxMemMB,omitempty"` // Information about the current available resources on the current resource pool // for a virtual machine. // @@ -14864,26 +14926,26 @@ type ConfigTarget struct { // virtual machine. AutoVmotion *bool `xml:"autoVmotion" json:"autoVmotion,omitempty"` // List of generic PCI devices. - PciPassthrough []BaseVirtualMachinePciPassthroughInfo `xml:"pciPassthrough,omitempty,typeattr" json:"pciPassthrough,omitempty" vim:"4.0"` + PciPassthrough []BaseVirtualMachinePciPassthroughInfo `xml:"pciPassthrough,omitempty,typeattr" json:"pciPassthrough,omitempty"` // List of SRIOV devices. - Sriov []VirtualMachineSriovInfo `xml:"sriov,omitempty" json:"sriov,omitempty" vim:"5.5"` + Sriov []VirtualMachineSriovInfo `xml:"sriov,omitempty" json:"sriov,omitempty"` // List of vFlash modules. - VFlashModule []VirtualMachineVFlashModuleInfo `xml:"vFlashModule,omitempty" json:"vFlashModule,omitempty" vim:"5.5"` + VFlashModule []VirtualMachineVFlashModuleInfo `xml:"vFlashModule,omitempty" json:"vFlashModule,omitempty"` // List of shared GPU passthrough types. - SharedGpuPassthroughTypes []VirtualMachinePciSharedGpuPassthroughInfo `xml:"sharedGpuPassthroughTypes,omitempty" json:"sharedGpuPassthroughTypes,omitempty" vim:"6.0"` + SharedGpuPassthroughTypes []VirtualMachinePciSharedGpuPassthroughInfo `xml:"sharedGpuPassthroughTypes,omitempty" json:"sharedGpuPassthroughTypes,omitempty"` // Maximum available persistent memory reservation on a compute resource // in MB. - AvailablePersistentMemoryReservationMB int64 `xml:"availablePersistentMemoryReservationMB,omitempty" json:"availablePersistentMemoryReservationMB,omitempty" vim:"6.7"` + AvailablePersistentMemoryReservationMB int64 `xml:"availablePersistentMemoryReservationMB,omitempty" json:"availablePersistentMemoryReservationMB,omitempty"` // List of Dynamic DirectPath PCI devices. - DynamicPassthrough []VirtualMachineDynamicPassthroughInfo `xml:"dynamicPassthrough,omitempty" json:"dynamicPassthrough,omitempty" vim:"7.0"` + DynamicPassthrough []VirtualMachineDynamicPassthroughInfo `xml:"dynamicPassthrough,omitempty" json:"dynamicPassthrough,omitempty"` // Intel SGX information. - SgxTargetInfo *VirtualMachineSgxTargetInfo `xml:"sgxTargetInfo,omitempty" json:"sgxTargetInfo,omitempty" vim:"7.0"` + SgxTargetInfo *VirtualMachineSgxTargetInfo `xml:"sgxTargetInfo,omitempty" json:"sgxTargetInfo,omitempty"` // List of host clock resources available to support virtual precision // clock device. // // Used for // `VirtualPrecisionClockSystemClockBackingInfo` - PrecisionClockInfo []VirtualMachinePrecisionClockInfo `xml:"precisionClockInfo,omitempty" json:"precisionClockInfo,omitempty" vim:"7.0"` + PrecisionClockInfo []VirtualMachinePrecisionClockInfo `xml:"precisionClockInfo,omitempty" json:"precisionClockInfo,omitempty"` // Indicates whether the compute resource is capable of running AMD Secure // Encrypted Virtualization (SEV) enabled virtual machines. // @@ -15170,7 +15232,6 @@ type ConflictingConfiguration struct { func init() { t["ConflictingConfiguration"] = reflect.TypeOf((*ConflictingConfiguration)(nil)).Elem() - minAPIVersionForType["ConflictingConfiguration"] = "5.5" } // This class defines the configuration that is in conflict. @@ -15187,7 +15248,6 @@ type ConflictingConfigurationConfig struct { func init() { t["ConflictingConfigurationConfig"] = reflect.TypeOf((*ConflictingConfigurationConfig)(nil)).Elem() - minAPIVersionForType["ConflictingConfigurationConfig"] = "5.5" } type ConflictingConfigurationFault ConflictingConfiguration @@ -15210,7 +15270,6 @@ type ConflictingDatastoreFound struct { func init() { t["ConflictingDatastoreFound"] = reflect.TypeOf((*ConflictingDatastoreFound)(nil)).Elem() - minAPIVersionForType["ConflictingDatastoreFound"] = "5.1" } type ConflictingDatastoreFoundFault ConflictingDatastoreFound @@ -15235,6 +15294,7 @@ type ConnectNvmeControllerExRequestType struct { func init() { t["ConnectNvmeControllerExRequestType"] = reflect.TypeOf((*ConnectNvmeControllerExRequestType)(nil)).Elem() + minAPIVersionForType["ConnectNvmeControllerExRequestType"] = "7.0.3.0" } type ConnectNvmeControllerEx_Task ConnectNvmeControllerExRequestType @@ -15474,7 +15534,6 @@ type CpuHotPlugNotSupported struct { func init() { t["CpuHotPlugNotSupported"] = reflect.TypeOf((*CpuHotPlugNotSupported)(nil)).Elem() - minAPIVersionForType["CpuHotPlugNotSupported"] = "4.0" } type CpuHotPlugNotSupportedFault CpuHotPlugNotSupported @@ -15509,16 +15568,16 @@ type CpuIncompatible struct { // format. // // The '-' character indicates an unknown value. - RegisterBits string `xml:"registerBits,omitempty" json:"registerBits,omitempty" vim:"2.5"` + RegisterBits string `xml:"registerBits,omitempty" json:"registerBits,omitempty"` // The desired values for the register's bits. // // The 'x' character indicates // don't-care. - DesiredBits string `xml:"desiredBits,omitempty" json:"desiredBits,omitempty" vim:"2.5"` + DesiredBits string `xml:"desiredBits,omitempty" json:"desiredBits,omitempty"` // The host that is not compatible with the requirements. // // Refers instance of `HostSystem`. - Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty" vim:"2.5"` + Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty"` } func init() { @@ -15536,7 +15595,7 @@ type CpuIncompatible1ECX struct { // Flag to indicate bit 0 is incompatible. Sse3 bool `xml:"sse3" json:"sse3"` // Flag to indicate bit 1 is incompatible. - Pclmulqdq *bool `xml:"pclmulqdq" json:"pclmulqdq,omitempty" vim:"5.0"` + Pclmulqdq *bool `xml:"pclmulqdq" json:"pclmulqdq,omitempty"` // Flag to indicate bit 9 is incompatible. Ssse3 bool `xml:"ssse3" json:"ssse3"` // Flag to indicate bit 19 is incompatible. @@ -15544,7 +15603,7 @@ type CpuIncompatible1ECX struct { // Flag to indicate bit 20 is incompatible. Sse42 bool `xml:"sse42" json:"sse42"` // Flag to indicate bit 25 is incompatible. - Aes *bool `xml:"aes" json:"aes,omitempty" vim:"5.0"` + Aes *bool `xml:"aes" json:"aes,omitempty"` // Flag to indicate that bits other than 0/1/9/19/20/25 are incompatible. // // I.e. the detected incompatibilities cannot be completely described by @@ -15560,7 +15619,6 @@ type CpuIncompatible1ECX struct { func init() { t["CpuIncompatible1ECX"] = reflect.TypeOf((*CpuIncompatible1ECX)(nil)).Elem() - minAPIVersionForType["CpuIncompatible1ECX"] = "2.5" } type CpuIncompatible1ECXFault CpuIncompatible1ECX @@ -15600,7 +15658,6 @@ type CpuIncompatible81EDX struct { func init() { t["CpuIncompatible81EDX"] = reflect.TypeOf((*CpuIncompatible81EDX)(nil)).Elem() - minAPIVersionForType["CpuIncompatible81EDX"] = "2.5" } type CpuIncompatible81EDXFault CpuIncompatible81EDX @@ -15758,6 +15815,30 @@ type CreateCollectorForTasksResponse struct { Returnval ManagedObjectReference `xml:"returnval" json:"returnval"` } +type CreateCollectorWithInfoFilterForTasks CreateCollectorWithInfoFilterForTasksRequestType + +func init() { + t["CreateCollectorWithInfoFilterForTasks"] = reflect.TypeOf((*CreateCollectorWithInfoFilterForTasks)(nil)).Elem() +} + +// The parameters of `TaskManager.CreateCollectorWithInfoFilterForTasks`. +type CreateCollectorWithInfoFilterForTasksRequestType struct { + This ManagedObjectReference `xml:"_this" json:"-"` + // The specification for the task query filter. + Filter TaskFilterSpec `xml:"filter" json:"filter"` + // The specification for the task info filter. + InfoFilter *TaskInfoFilterSpec `xml:"infoFilter,omitempty" json:"infoFilter,omitempty"` +} + +func init() { + t["CreateCollectorWithInfoFilterForTasksRequestType"] = reflect.TypeOf((*CreateCollectorWithInfoFilterForTasksRequestType)(nil)).Elem() + minAPIVersionForType["CreateCollectorWithInfoFilterForTasksRequestType"] = "8.0.3.0" +} + +type CreateCollectorWithInfoFilterForTasksResponse struct { + Returnval ManagedObjectReference `xml:"returnval" json:"returnval"` +} + type CreateContainerView CreateContainerViewRequestType func init() { @@ -15793,46 +15874,46 @@ type CreateContainerViewRequestType struct { // Depending on the container type, the server will use the following // properties of the container instance to obtain objects for the // view's object list: - // - `Folder` object - `Folder.childEntity` - // property. - // If recursive is false, the container list includes the reference - // to the child entity in the folder instance. - // If recursive is true, the server will follow the child - // folder path(s) to collect additional childEntity references. - // - `ResourcePool` object - `ResourcePool.vm` - // and `ResourcePool.resourcePool` properties. - // If recursive is false, the object list will contain references - // to the virtual machines associated with this resource pool, - // and references to virtual machines associated with the - // immediate child resource pools. If recursive is true, - // the server will follow all child resource pool paths - // extending from the immediate children (and their children, - // and so on) to collect additional references to virtual machines. - // - `ComputeResource` object - `ComputeResource.host` - // and `ComputeResource.resourcePool` properties. - // If recursive is false, the object list will contain references - // to the host systems associated with this compute resource, - // references to virtual machines associated with the - // host systems, and references to virtual machines associated - // with the immediate child resource pools. - // If recursive is true, the server will follow the child - // resource pool paths (and their child resource pool paths, - // and so on) to collect additional references to virtual machines. - // - `Datacenter` object - `Datacenter.vmFolder`, - // `Datacenter.hostFolder`, - // `Datacenter.datastoreFolder`, and - // `Datacenter.networkFolder` properties. - // If recursive is set to false, the server uses the - // immediate child folders for the virtual machines, - // hosts, datastores, and networks associated with this - // datacenter. If recursive is set to true, the server - // will follow the folder paths to collect references - // to additional objects. - // - `HostSystem` object - `HostSystem.vm` - // property. - // The view object list contains references to the virtual machines - // associated with this host system. The value of recursive does not - // affect this behavior. + // - `Folder` object - `Folder.childEntity` + // property. + // If recursive is false, the container list includes the reference + // to the child entity in the folder instance. + // If recursive is true, the server will follow the child + // folder path(s) to collect additional childEntity references. + // - `ResourcePool` object - `ResourcePool.vm` + // and `ResourcePool.resourcePool` properties. + // If recursive is false, the object list will contain references + // to the virtual machines associated with this resource pool, + // and references to virtual machines associated with the + // immediate child resource pools. If recursive is true, + // the server will follow all child resource pool paths + // extending from the immediate children (and their children, + // and so on) to collect additional references to virtual machines. + // - `ComputeResource` object - `ComputeResource.host` + // and `ComputeResource.resourcePool` properties. + // If recursive is false, the object list will contain references + // to the host systems associated with this compute resource, + // references to virtual machines associated with the + // host systems, and references to virtual machines associated + // with the immediate child resource pools. + // If recursive is true, the server will follow the child + // resource pool paths (and their child resource pool paths, + // and so on) to collect additional references to virtual machines. + // - `Datacenter` object - `Datacenter.vmFolder`, + // `Datacenter.hostFolder`, + // `Datacenter.datastoreFolder`, and + // `Datacenter.networkFolder` properties. + // If recursive is set to false, the server uses the + // immediate child folders for the virtual machines, + // hosts, datastores, and networks associated with this + // datacenter. If recursive is set to true, the server + // will follow the folder paths to collect references + // to additional objects. + // - `HostSystem` object - `HostSystem.vm` + // property. + // The view object list contains references to the virtual machines + // associated with this host system. The value of recursive does not + // affect this behavior. Recursive bool `xml:"recursive" json:"recursive"` } @@ -15947,11 +16028,11 @@ type CreateDefaultProfileRequestType struct { // containing data for the named profile. The type name does not have // to be system-defined. A user-defined profile can include various // dynamically-defined profiles. - ProfileTypeName string `xml:"profileTypeName,omitempty" json:"profileTypeName,omitempty" vim:"5.0"` + ProfileTypeName string `xml:"profileTypeName,omitempty" json:"profileTypeName,omitempty"` // Base profile used during the operation. // // Refers instance of `Profile`. - Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty" vim:"5.0"` + Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty"` } func init() { @@ -16816,6 +16897,7 @@ type CreateSoftwareAdapterRequestType struct { func init() { t["CreateSoftwareAdapterRequestType"] = reflect.TypeOf((*CreateSoftwareAdapterRequestType)(nil)).Elem() + minAPIVersionForType["CreateSoftwareAdapterRequestType"] = "7.0.3.0" } type CreateSoftwareAdapterResponse struct { @@ -16862,7 +16944,6 @@ type CreateTaskAction struct { func init() { t["CreateTaskAction"] = reflect.TypeOf((*CreateTaskAction)(nil)).Elem() - minAPIVersionForType["CreateTaskAction"] = "2.5" } // The parameters of `TaskManager.CreateTask`. @@ -16880,11 +16961,11 @@ type CreateTaskRequestType struct { // false otherwise Cancelable bool `xml:"cancelable" json:"cancelable"` // Key of the task that is the parent of this task - ParentTaskKey string `xml:"parentTaskKey,omitempty" json:"parentTaskKey,omitempty" vim:"4.0"` + ParentTaskKey string `xml:"parentTaskKey,omitempty" json:"parentTaskKey,omitempty"` // Activation Id is a client-provided token to link an // API call with a task. When provided, the activationId is added to the // `TaskInfo` - ActivationId string `xml:"activationId,omitempty" json:"activationId,omitempty" vim:"6.0"` + ActivationId string `xml:"activationId,omitempty" json:"activationId,omitempty"` } func init() { @@ -17144,7 +17225,6 @@ type CryptoKeyId struct { func init() { t["CryptoKeyId"] = reflect.TypeOf((*CryptoKeyId)(nil)).Elem() - minAPIVersionForType["CryptoKeyId"] = "6.5" } // Data Object representing a plain text cryptographic key. @@ -17158,7 +17238,6 @@ type CryptoKeyPlain struct { func init() { t["CryptoKeyPlain"] = reflect.TypeOf((*CryptoKeyPlain)(nil)).Elem() - minAPIVersionForType["CryptoKeyPlain"] = "6.5" } // CryptoKeyResult.java -- @@ -17175,7 +17254,6 @@ type CryptoKeyResult struct { func init() { t["CryptoKeyResult"] = reflect.TypeOf((*CryptoKeyResult)(nil)).Elem() - minAPIVersionForType["CryptoKeyResult"] = "6.5" } type CryptoManagerHostDisable CryptoManagerHostDisableRequestType @@ -17227,6 +17305,8 @@ type CryptoManagerHostKeyStatus struct { // // See `CryptoManagerHostKeyManagementType_enum` for valid values. ManagementType string `xml:"managementType,omitempty" json:"managementType,omitempty"` + // Whether the provider of the key has been granted access. + AccessGranted *bool `xml:"accessGranted" json:"accessGranted,omitempty" vim:"8.0.3.0"` } func init() { @@ -17289,6 +17369,7 @@ type CryptoManagerKmipCertSignRequest struct { func init() { t["CryptoManagerKmipCertSignRequest"] = reflect.TypeOf((*CryptoManagerKmipCertSignRequest)(nil)).Elem() + minAPIVersionForType["CryptoManagerKmipCertSignRequest"] = "8.0.1.0" } // Basic information of a certificate. @@ -17325,7 +17406,6 @@ type CryptoManagerKmipCertificateInfo struct { func init() { t["CryptoManagerKmipCertificateInfo"] = reflect.TypeOf((*CryptoManagerKmipCertificateInfo)(nil)).Elem() - minAPIVersionForType["CryptoManagerKmipCertificateInfo"] = "6.5" } // Status of a KMIP cluster. @@ -17335,11 +17415,11 @@ type CryptoManagerKmipClusterStatus struct { // The ID of the KMIP cluster. ClusterId KeyProviderId `xml:"clusterId" json:"clusterId"` // KMS cluster overall status. - OverallStatus ManagedEntityStatus `xml:"overallStatus,omitempty" json:"overallStatus,omitempty" vim:"7.0"` + OverallStatus ManagedEntityStatus `xml:"overallStatus,omitempty" json:"overallStatus,omitempty"` // Key provider management type. // // See `KmipClusterInfoKmsManagementType_enum` for valid values. - ManagementType string `xml:"managementType,omitempty" json:"managementType,omitempty" vim:"7.0"` + ManagementType string `xml:"managementType,omitempty" json:"managementType,omitempty"` // Status of the KMIP servers in this cluster. Servers []CryptoManagerKmipServerStatus `xml:"servers" json:"servers"` // The basic information about the client's certificate. @@ -17348,7 +17428,6 @@ type CryptoManagerKmipClusterStatus struct { func init() { t["CryptoManagerKmipClusterStatus"] = reflect.TypeOf((*CryptoManagerKmipClusterStatus)(nil)).Elem() - minAPIVersionForType["CryptoManagerKmipClusterStatus"] = "6.5" } // Status of a Crypto key @@ -17377,7 +17456,6 @@ type CryptoManagerKmipCryptoKeyStatus struct { func init() { t["CryptoManagerKmipCryptoKeyStatus"] = reflect.TypeOf((*CryptoManagerKmipCryptoKeyStatus)(nil)).Elem() - minAPIVersionForType["CryptoManagerKmipCryptoKeyStatus"] = "6.7.2" } // Crypto key custom attribute spec @@ -17407,7 +17485,6 @@ type CryptoManagerKmipServerCertInfo struct { func init() { t["CryptoManagerKmipServerCertInfo"] = reflect.TypeOf((*CryptoManagerKmipServerCertInfo)(nil)).Elem() - minAPIVersionForType["CryptoManagerKmipServerCertInfo"] = "6.5" } // Status of a KMIP server. @@ -17430,7 +17507,6 @@ type CryptoManagerKmipServerStatus struct { func init() { t["CryptoManagerKmipServerStatus"] = reflect.TypeOf((*CryptoManagerKmipServerStatus)(nil)).Elem() - minAPIVersionForType["CryptoManagerKmipServerStatus"] = "6.5" } // This data object type encapsulates virtual machine or disk encryption @@ -17441,7 +17517,6 @@ type CryptoSpec struct { func init() { t["CryptoSpec"] = reflect.TypeOf((*CryptoSpec)(nil)).Elem() - minAPIVersionForType["CryptoSpec"] = "6.5" } // This data object type encapsulates virtual machine or disk encryption @@ -17452,7 +17527,6 @@ type CryptoSpecDecrypt struct { func init() { t["CryptoSpecDecrypt"] = reflect.TypeOf((*CryptoSpecDecrypt)(nil)).Elem() - minAPIVersionForType["CryptoSpecDecrypt"] = "6.5" } // This data object type encapsulates virtual machine or disk cryptographic @@ -17465,7 +17539,6 @@ type CryptoSpecDeepRecrypt struct { func init() { t["CryptoSpecDeepRecrypt"] = reflect.TypeOf((*CryptoSpecDeepRecrypt)(nil)).Elem() - minAPIVersionForType["CryptoSpecDeepRecrypt"] = "6.5" } // This data object type encapsulates virtual machine or disk cryptohraphic @@ -17478,7 +17551,6 @@ type CryptoSpecEncrypt struct { func init() { t["CryptoSpecEncrypt"] = reflect.TypeOf((*CryptoSpecEncrypt)(nil)).Elem() - minAPIVersionForType["CryptoSpecEncrypt"] = "6.5" } // This data object type indicates that the encryption settings of the @@ -17489,7 +17561,6 @@ type CryptoSpecNoOp struct { func init() { t["CryptoSpecNoOp"] = reflect.TypeOf((*CryptoSpecNoOp)(nil)).Elem() - minAPIVersionForType["CryptoSpecNoOp"] = "6.5" } // This data object type indicates that the operation requires keys to be sent @@ -17504,7 +17575,6 @@ type CryptoSpecRegister struct { func init() { t["CryptoSpecRegister"] = reflect.TypeOf((*CryptoSpecRegister)(nil)).Elem() - minAPIVersionForType["CryptoSpecRegister"] = "6.5" } // This data object type encapsulates virtual machine or disk cryptographic @@ -17517,7 +17587,6 @@ type CryptoSpecShallowRecrypt struct { func init() { t["CryptoSpecShallowRecrypt"] = reflect.TypeOf((*CryptoSpecShallowRecrypt)(nil)).Elem() - minAPIVersionForType["CryptoSpecShallowRecrypt"] = "6.5" } type CryptoUnlockRequestType struct { @@ -17574,11 +17643,11 @@ type CustomFieldDef struct { // // If not specified, // the field is valid for all managed objects. - ManagedObjectType string `xml:"managedObjectType,omitempty" json:"managedObjectType,omitempty" vim:"2.5"` + ManagedObjectType string `xml:"managedObjectType,omitempty" json:"managedObjectType,omitempty"` // The set of privileges to apply on this field definition - FieldDefPrivileges *PrivilegePolicyDef `xml:"fieldDefPrivileges,omitempty" json:"fieldDefPrivileges,omitempty" vim:"2.5"` + FieldDefPrivileges *PrivilegePolicyDef `xml:"fieldDefPrivileges,omitempty" json:"fieldDefPrivileges,omitempty"` // The set of privileges to apply on instances of this field - FieldInstancePrivileges *PrivilegePolicyDef `xml:"fieldInstancePrivileges,omitempty" json:"fieldInstancePrivileges,omitempty" vim:"2.5"` + FieldInstancePrivileges *PrivilegePolicyDef `xml:"fieldInstancePrivileges,omitempty" json:"fieldInstancePrivileges,omitempty"` } func init() { @@ -17674,7 +17743,7 @@ type CustomFieldValueChangedEvent struct { // The new value that was set. Value string `xml:"value" json:"value"` // The previous service state. - PrevState string `xml:"prevState,omitempty" json:"prevState,omitempty" vim:"6.5"` + PrevState string `xml:"prevState,omitempty" json:"prevState,omitempty"` } func init() { @@ -17718,7 +17787,6 @@ type CustomizationAutoIpV6Generator struct { func init() { t["CustomizationAutoIpV6Generator"] = reflect.TypeOf((*CustomizationAutoIpV6Generator)(nil)).Elem() - minAPIVersionForType["CustomizationAutoIpV6Generator"] = "4.0" } // Guest customization settings to customize a Linux guest operating @@ -17774,7 +17842,6 @@ type CustomizationCustomIpV6Generator struct { func init() { t["CustomizationCustomIpV6Generator"] = reflect.TypeOf((*CustomizationCustomIpV6Generator)(nil)).Elem() - minAPIVersionForType["CustomizationCustomIpV6Generator"] = "4.0" } // Specifies that the VirtualCenter server will launch an external application to @@ -17812,7 +17879,6 @@ type CustomizationDhcpIpV6Generator struct { func init() { t["CustomizationDhcpIpV6Generator"] = reflect.TypeOf((*CustomizationDhcpIpV6Generator)(nil)).Elem() - minAPIVersionForType["CustomizationDhcpIpV6Generator"] = "4.0" } // Base for customization events. @@ -17826,7 +17892,6 @@ type CustomizationEvent struct { func init() { t["CustomizationEvent"] = reflect.TypeOf((*CustomizationEvent)(nil)).Elem() - minAPIVersionForType["CustomizationEvent"] = "2.5" } // The customization sequence in the guest failed. @@ -17834,12 +17899,11 @@ type CustomizationFailed struct { CustomizationEvent // Reason why the customization failed @see CustomizationFailed.ReasonCode . - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"7.0"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { t["CustomizationFailed"] = reflect.TypeOf((*CustomizationFailed)(nil)).Elem() - minAPIVersionForType["CustomizationFailed"] = "2.5" } // Base for exceptions that can be thrown from the customizer. @@ -17878,7 +17942,6 @@ type CustomizationFixedIpV6 struct { func init() { t["CustomizationFixedIpV6"] = reflect.TypeOf((*CustomizationFixedIpV6)(nil)).Elem() - minAPIVersionForType["CustomizationFixedIpV6"] = "4.0" } // A fixed name. @@ -18005,7 +18068,7 @@ type CustomizationIPSettings struct { Gateway []string `xml:"gateway,omitempty" json:"gateway,omitempty"` // This contains the IpGenerator, subnet mask and gateway info for all // the ipv6 addresses associated with the virtual network adapter. - IpV6Spec *CustomizationIPSettingsIpV6AddressSpec `xml:"ipV6Spec,omitempty" json:"ipV6Spec,omitempty" vim:"4.0"` + IpV6Spec *CustomizationIPSettingsIpV6AddressSpec `xml:"ipV6Spec,omitempty" json:"ipV6Spec,omitempty"` // A list of server IP addresses to use for DNS lookup in a Windows guest operating // system. // @@ -18048,7 +18111,6 @@ type CustomizationIPSettingsIpV6AddressSpec struct { func init() { t["CustomizationIPSettingsIpV6AddressSpec"] = reflect.TypeOf((*CustomizationIPSettingsIpV6AddressSpec)(nil)).Elem() - minAPIVersionForType["CustomizationIPSettingsIpV6AddressSpec"] = "4.0" } // The Identification data object type provides information needed to join a workgroup @@ -18120,7 +18182,6 @@ type CustomizationIpV6Generator struct { func init() { t["CustomizationIpV6Generator"] = reflect.TypeOf((*CustomizationIpV6Generator)(nil)).Elem() - minAPIVersionForType["CustomizationIpV6Generator"] = "4.0" } // The LicenseFilePrintData type maps directly to the LicenseFilePrintData key in the @@ -18155,7 +18216,6 @@ type CustomizationLinuxIdentityFailed struct { func init() { t["CustomizationLinuxIdentityFailed"] = reflect.TypeOf((*CustomizationLinuxIdentityFailed)(nil)).Elem() - minAPIVersionForType["CustomizationLinuxIdentityFailed"] = "2.5" } // Base object type for optional operations supported by the customization process for @@ -18190,13 +18250,27 @@ type CustomizationLinuxPrep struct { // other regional designation. // // See the List of supported time zones for different vSphere versions in Linux/Unix systems. - TimeZone string `xml:"timeZone,omitempty" json:"timeZone,omitempty" vim:"4.0"` + TimeZone string `xml:"timeZone,omitempty" json:"timeZone,omitempty"` // Specifies whether the hardware clock is in UTC or local time. - // - True when the hardware clock is in UTC. - // - False when the hardware clock is in local time. - HwClockUTC *bool `xml:"hwClockUTC" json:"hwClockUTC,omitempty" vim:"4.0"` + // - True when the hardware clock is in UTC. + // - False when the hardware clock is in local time. + HwClockUTC *bool `xml:"hwClockUTC" json:"hwClockUTC,omitempty"` // The script to run before and after GOS customization. - ScriptText string `xml:"scriptText,omitempty" json:"scriptText,omitempty" vim:"7.0"` + ScriptText string `xml:"scriptText,omitempty" json:"scriptText,omitempty"` + // The compatible customization method is an identifier of a customization + // strategy which is implementable in a group of Linux operating systems. + // + // This value does not need to be set if your operating system is officially + // supported by VMware guest operating system customization. When using a + // Linux operating system which hasn't been officially supported and it is + // designed to be 100% bug-for-bug compatible with an officially supported + // Linux operating system, it can be customized by an existing customization + // method. + // + // Please set the compatible customization method to a supported string value + // e.g. "GOSC\_METHOD\_1". + // See Supported compatible customization method list. + CompatibleCustomizationMethod string `xml:"compatibleCustomizationMethod,omitempty" json:"compatibleCustomizationMethod,omitempty" vim:"8.0.3.0"` } func init() { @@ -18220,7 +18294,6 @@ type CustomizationNetworkSetupFailed struct { func init() { t["CustomizationNetworkSetupFailed"] = reflect.TypeOf((*CustomizationNetworkSetupFailed)(nil)).Elem() - minAPIVersionForType["CustomizationNetworkSetupFailed"] = "2.5" } // Base object type for optional operations supported by the customization process. @@ -18258,7 +18331,6 @@ type CustomizationPending struct { func init() { t["CustomizationPending"] = reflect.TypeOf((*CustomizationPending)(nil)).Elem() - minAPIVersionForType["CustomizationPending"] = "2.5" } type CustomizationPendingFault CustomizationPending @@ -18313,7 +18385,7 @@ type CustomizationSpec struct { // Both the client and the server can use this to determine if // stored passwords can be decrypted by the server or if the passwords need to be // re-entered and re-encrypted before the specification can be used. - EncryptionKey []byte `xml:"encryptionKey,omitempty" json:"encryptionKey,omitempty"` + EncryptionKey ByteSlice `xml:"encryptionKey,omitempty" json:"encryptionKey,omitempty"` } func init() { @@ -18396,7 +18468,6 @@ type CustomizationStartedEvent struct { func init() { t["CustomizationStartedEvent"] = reflect.TypeOf((*CustomizationStartedEvent)(nil)).Elem() - minAPIVersionForType["CustomizationStartedEvent"] = "2.5" } // Use stateless autoconfiguration to configure to ipv6 address @@ -18406,7 +18477,6 @@ type CustomizationStatelessIpV6Generator struct { func init() { t["CustomizationStatelessIpV6Generator"] = reflect.TypeOf((*CustomizationStatelessIpV6Generator)(nil)).Elem() - minAPIVersionForType["CustomizationStatelessIpV6Generator"] = "4.0" } // The customization sequence completed successfully in the guest. @@ -18416,7 +18486,6 @@ type CustomizationSucceeded struct { func init() { t["CustomizationSucceeded"] = reflect.TypeOf((*CustomizationSucceeded)(nil)).Elem() - minAPIVersionForType["CustomizationSucceeded"] = "2.5" } // An object representation of a Windows `sysprep.xml` answer file. @@ -18463,7 +18532,6 @@ type CustomizationSysprepFailed struct { func init() { t["CustomizationSysprepFailed"] = reflect.TypeOf((*CustomizationSysprepFailed)(nil)).Elem() - minAPIVersionForType["CustomizationSysprepFailed"] = "2.5" } // An alternate way to specify the `sysprep.xml` answer file. @@ -18491,7 +18559,6 @@ type CustomizationUnknownFailure struct { func init() { t["CustomizationUnknownFailure"] = reflect.TypeOf((*CustomizationUnknownFailure)(nil)).Elem() - minAPIVersionForType["CustomizationUnknownFailure"] = "2.5" } // The IP address is left unspecified. @@ -18516,7 +18583,6 @@ type CustomizationUnknownIpV6Generator struct { func init() { t["CustomizationUnknownIpV6Generator"] = reflect.TypeOf((*CustomizationUnknownIpV6Generator)(nil)).Elem() - minAPIVersionForType["CustomizationUnknownIpV6Generator"] = "4.0" } // Indicates that the name is not specified in advance. @@ -18606,7 +18672,7 @@ type CustomizationWinOptions struct { // taken after running sysprep. // // Defaults to "reboot". - Reboot CustomizationSysprepRebootOption `xml:"reboot,omitempty" json:"reboot,omitempty" vim:"2.5"` + Reboot CustomizationSysprepRebootOption `xml:"reboot,omitempty" json:"reboot,omitempty"` } func init() { @@ -18696,7 +18762,6 @@ type DVPortConfigInfo struct { func init() { t["DVPortConfigInfo"] = reflect.TypeOf((*DVPortConfigInfo)(nil)).Elem() - minAPIVersionForType["DVPortConfigInfo"] = "4.0" } // Specification to reconfigure a `DistributedVirtualPort`. @@ -18707,8 +18772,8 @@ type DVPortConfigSpec struct { // // The valid values // are: - // - `edit` - // - `remove` + // - `edit` + // - `remove` Operation string `xml:"operation" json:"operation"` // Key of the port to be reconfigured. Key string `xml:"key,omitempty" json:"key,omitempty"` @@ -18731,7 +18796,6 @@ type DVPortConfigSpec struct { func init() { t["DVPortConfigSpec"] = reflect.TypeOf((*DVPortConfigSpec)(nil)).Elem() - minAPIVersionForType["DVPortConfigSpec"] = "4.0" } // The virtual machine is configured to use a DVPort, which is not @@ -18746,7 +18810,6 @@ type DVPortNotSupported struct { func init() { t["DVPortNotSupported"] = reflect.TypeOf((*DVPortNotSupported)(nil)).Elem() - minAPIVersionForType["DVPortNotSupported"] = "4.1" } type DVPortNotSupportedFault DVPortNotSupported @@ -18775,7 +18838,7 @@ type DVPortSetting struct { // `DVSFeatureCapability`, // `HostCapability`, `PhysicalNic`, // and `VirtualEthernetCardOption` objects. - VmDirectPathGen2Allowed *BoolPolicy `xml:"vmDirectPathGen2Allowed,omitempty" json:"vmDirectPathGen2Allowed,omitempty" vim:"4.1"` + VmDirectPathGen2Allowed *BoolPolicy `xml:"vmDirectPathGen2Allowed,omitempty" json:"vmDirectPathGen2Allowed,omitempty"` // Network shaping policy for controlling throughput of inbound traffic. InShapingPolicy *DVSTrafficShapingPolicy `xml:"inShapingPolicy,omitempty" json:"inShapingPolicy,omitempty"` // Network shaping policy for controlling throughput of outbound traffic. @@ -18790,14 +18853,13 @@ type DVPortSetting struct { // // The default value for this property is "-1", indicating that // this port is not associated with any network resource pool. - NetworkResourcePoolKey *StringPolicy `xml:"networkResourcePoolKey,omitempty" json:"networkResourcePoolKey,omitempty" vim:"5.0"` + NetworkResourcePoolKey *StringPolicy `xml:"networkResourcePoolKey,omitempty" json:"networkResourcePoolKey,omitempty"` // Configuration for Network Filter Policy. - FilterPolicy *DvsFilterPolicy `xml:"filterPolicy,omitempty" json:"filterPolicy,omitempty" vim:"5.5"` + FilterPolicy *DvsFilterPolicy `xml:"filterPolicy,omitempty" json:"filterPolicy,omitempty"` } func init() { t["DVPortSetting"] = reflect.TypeOf((*DVPortSetting)(nil)).Elem() - minAPIVersionForType["DVPortSetting"] = "4.0" } // The state of a DistributedVirtualPort. @@ -18816,7 +18878,6 @@ type DVPortState struct { func init() { t["DVPortState"] = reflect.TypeOf((*DVPortState)(nil)).Elem() - minAPIVersionForType["DVPortState"] = "4.0" } // The `DVPortStatus` data object @@ -18845,7 +18906,7 @@ type DVPortStatus struct { // The MAC address that is used at this port. MacAddress string `xml:"macAddress,omitempty" json:"macAddress,omitempty"` // Additional information regarding the current status of the port. - StatusDetail string `xml:"statusDetail,omitempty" json:"statusDetail,omitempty" vim:"4.1"` + StatusDetail string `xml:"statusDetail,omitempty" json:"statusDetail,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer // supported and there is no replacement. // @@ -18859,7 +18920,7 @@ type DVPortStatus struct { // If the host software is not capable of VMDirectPath Gen 2, // this property will be unset. See // `HostCapability*.*HostCapability.vmDirectPathGen2Supported`. - VmDirectPathGen2Active *bool `xml:"vmDirectPathGen2Active" json:"vmDirectPathGen2Active,omitempty" vim:"4.1"` + VmDirectPathGen2Active *bool `xml:"vmDirectPathGen2Active" json:"vmDirectPathGen2Active,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer // supported and there is no replacement. // @@ -18878,7 +18939,7 @@ type DVPortStatus struct { // with an additional explanation provided by the platform. // // Note that this list of reasons is not guaranteed to be exhaustive. - VmDirectPathGen2InactiveReasonNetwork []string `xml:"vmDirectPathGen2InactiveReasonNetwork,omitempty" json:"vmDirectPathGen2InactiveReasonNetwork,omitempty" vim:"4.1"` + VmDirectPathGen2InactiveReasonNetwork []string `xml:"vmDirectPathGen2InactiveReasonNetwork,omitempty" json:"vmDirectPathGen2InactiveReasonNetwork,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer // supported and there is no replacement. // @@ -18899,7 +18960,7 @@ type DVPortStatus struct { // Note that this list of reasons is not guaranteed to be exhaustive. // // See also `HostCapability.vmDirectPathGen2Supported`. - VmDirectPathGen2InactiveReasonOther []string `xml:"vmDirectPathGen2InactiveReasonOther,omitempty" json:"vmDirectPathGen2InactiveReasonOther,omitempty" vim:"4.1"` + VmDirectPathGen2InactiveReasonOther []string `xml:"vmDirectPathGen2InactiveReasonOther,omitempty" json:"vmDirectPathGen2InactiveReasonOther,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer // supported and there is no replacement. // @@ -18907,12 +18968,11 @@ type DVPortStatus struct { // contain an explanation provided by the platform, beyond the reasons // (if any) listed in `DVPortStatus.vmDirectPathGen2InactiveReasonNetwork` // and/or `DVPortStatus.vmDirectPathGen2InactiveReasonOther`. - VmDirectPathGen2InactiveReasonExtended string `xml:"vmDirectPathGen2InactiveReasonExtended,omitempty" json:"vmDirectPathGen2InactiveReasonExtended,omitempty" vim:"4.1"` + VmDirectPathGen2InactiveReasonExtended string `xml:"vmDirectPathGen2InactiveReasonExtended,omitempty" json:"vmDirectPathGen2InactiveReasonExtended,omitempty"` } func init() { t["DVPortStatus"] = reflect.TypeOf((*DVPortStatus)(nil)).Elem() - minAPIVersionForType["DVPortStatus"] = "4.0" } // The `DVPortgroupConfigInfo` data object defines @@ -18950,7 +19010,7 @@ type DVPortgroupConfigInfo struct { // `DistributedVirtualPortgroup*.*DistributedVirtualPortgroupBackingType_enum` // for possible values. // The default value is "standard" - BackingType string `xml:"backingType,omitempty" json:"backingType,omitempty" vim:"7.0"` + BackingType string `xml:"backingType,omitempty" json:"backingType,omitempty"` // Portgroup policy. Policy BaseDVPortgroupPolicy `xml:"policy,typeattr" json:"policy"` // If set, a name will be automatically generated based on this format @@ -19006,7 +19066,7 @@ type DVPortgroupConfigInfo struct { // likely to be deleted automatically, as a part of auto-shrink step, if there are more // than certain number of free ports. If the portgroup never auto-expanded, then it will // never lose any free ports. - AutoExpand *bool `xml:"autoExpand" json:"autoExpand,omitempty" vim:"5.0"` + AutoExpand *bool `xml:"autoExpand" json:"autoExpand,omitempty"` // The key of virtual NIC network resource pool to be associated with a portgroup. // // The default value for this property is unset, indicating that @@ -19014,22 +19074,21 @@ type DVPortgroupConfigInfo struct { // To clear the value of this property and revert to unset, set the // `DVPortgroupConfigSpec.vmVnicNetworkResourcePoolKey` // to "-1" in an update operation. - VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty" json:"vmVnicNetworkResourcePoolKey,omitempty" vim:"6.0"` + VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty" json:"vmVnicNetworkResourcePoolKey,omitempty"` // Indicates whether the portgroup is an uplink portroup. - Uplink *bool `xml:"uplink" json:"uplink,omitempty" vim:"6.5"` + Uplink *bool `xml:"uplink" json:"uplink,omitempty"` // The UUID of transport zone to be associated with a NSX portgroup. - TransportZoneUuid string `xml:"transportZoneUuid,omitempty" json:"transportZoneUuid,omitempty" vim:"7.0"` + TransportZoneUuid string `xml:"transportZoneUuid,omitempty" json:"transportZoneUuid,omitempty"` // The name of transport zone to be associated with a NSX portgroup. - TransportZoneName string `xml:"transportZoneName,omitempty" json:"transportZoneName,omitempty" vim:"7.0"` + TransportZoneName string `xml:"transportZoneName,omitempty" json:"transportZoneName,omitempty"` // The logical switch UUID, which is used by NSX portgroup - LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty" json:"logicalSwitchUuid,omitempty" vim:"7.0"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty" json:"logicalSwitchUuid,omitempty"` // The segment ID of logical switch - SegmentId string `xml:"segmentId,omitempty" json:"segmentId,omitempty" vim:"7.0"` + SegmentId string `xml:"segmentId,omitempty" json:"segmentId,omitempty"` } func init() { t["DVPortgroupConfigInfo"] = reflect.TypeOf((*DVPortgroupConfigInfo)(nil)).Elem() - minAPIVersionForType["DVPortgroupConfigInfo"] = "4.0" } // The `DVPortgroupConfigSpec` @@ -19083,7 +19142,7 @@ type DVPortgroupConfigSpec struct { // `DistributedVirtualPortgroup*.*DistributedVirtualPortgroupBackingType_enum` // for possible values. // The default value is "standard" - BackingType string `xml:"backingType,omitempty" json:"backingType,omitempty" vim:"7.0"` + BackingType string `xml:"backingType,omitempty" json:"backingType,omitempty"` // Deprecated as of vSphere API 5.5. // // Eligible entities that can connect to the port. @@ -19109,25 +19168,24 @@ type DVPortgroupConfigSpec struct { // likely to be deleted automatically, as a part of auto-shrink step, if there are more // than certain number of free ports. If the portgroup never auto-expanded, then it will // never lose any free ports. - AutoExpand *bool `xml:"autoExpand" json:"autoExpand,omitempty" vim:"5.0"` + AutoExpand *bool `xml:"autoExpand" json:"autoExpand,omitempty"` // The key of virtual NIC network resource pool to be associated with a portgroup. // // Setting this property to "-1", would mean that this portgroup // is not associated with any virtual NIC network resource pool. - VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty" json:"vmVnicNetworkResourcePoolKey,omitempty" vim:"6.0"` + VmVnicNetworkResourcePoolKey string `xml:"vmVnicNetworkResourcePoolKey,omitempty" json:"vmVnicNetworkResourcePoolKey,omitempty"` // The UUID of transport zone to be associated with a NSX portgroup. - TransportZoneUuid string `xml:"transportZoneUuid,omitempty" json:"transportZoneUuid,omitempty" vim:"7.0"` + TransportZoneUuid string `xml:"transportZoneUuid,omitempty" json:"transportZoneUuid,omitempty"` // The name of transport zone to be associated with a NSX portgroup. - TransportZoneName string `xml:"transportZoneName,omitempty" json:"transportZoneName,omitempty" vim:"7.0"` + TransportZoneName string `xml:"transportZoneName,omitempty" json:"transportZoneName,omitempty"` // The logical switch UUID, which is used by NSX portgroup - LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty" json:"logicalSwitchUuid,omitempty" vim:"7.0"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty" json:"logicalSwitchUuid,omitempty"` // The segment ID of logical switch - SegmentId string `xml:"segmentId,omitempty" json:"segmentId,omitempty" vim:"7.0"` + SegmentId string `xml:"segmentId,omitempty" json:"segmentId,omitempty"` } func init() { t["DVPortgroupConfigSpec"] = reflect.TypeOf((*DVPortgroupConfigSpec)(nil)).Elem() - minAPIVersionForType["DVPortgroupConfigSpec"] = "4.0" } // Two distributed virtual portgroup was created. @@ -19137,7 +19195,6 @@ type DVPortgroupCreatedEvent struct { func init() { t["DVPortgroupCreatedEvent"] = reflect.TypeOf((*DVPortgroupCreatedEvent)(nil)).Elem() - minAPIVersionForType["DVPortgroupCreatedEvent"] = "4.0" } // Two distributed virtual portgroup was destroyed. @@ -19147,7 +19204,6 @@ type DVPortgroupDestroyedEvent struct { func init() { t["DVPortgroupDestroyedEvent"] = reflect.TypeOf((*DVPortgroupDestroyedEvent)(nil)).Elem() - minAPIVersionForType["DVPortgroupDestroyedEvent"] = "4.0" } // DVPortgroup related events. @@ -19157,7 +19213,6 @@ type DVPortgroupEvent struct { func init() { t["DVPortgroupEvent"] = reflect.TypeOf((*DVPortgroupEvent)(nil)).Elem() - minAPIVersionForType["DVPortgroupEvent"] = "4.0" } // The DistributedVirtualPortgroup policies. @@ -19194,18 +19249,17 @@ type DVPortgroupPolicy struct { // individual port to override the setting in // `DVPortgroupConfigInfo.defaultPortConfig` // of a portgroup. - NetworkResourcePoolOverrideAllowed *bool `xml:"networkResourcePoolOverrideAllowed" json:"networkResourcePoolOverrideAllowed,omitempty" vim:"5.0"` + NetworkResourcePoolOverrideAllowed *bool `xml:"networkResourcePoolOverrideAllowed" json:"networkResourcePoolOverrideAllowed,omitempty"` // Allow the setting of // `DVPortSetting.filterPolicy`, // for an individual port to override the setting in // `DVPortgroupConfigInfo.defaultPortConfig` of // a portgroup. - TrafficFilterOverrideAllowed *bool `xml:"trafficFilterOverrideAllowed" json:"trafficFilterOverrideAllowed,omitempty" vim:"5.5"` + TrafficFilterOverrideAllowed *bool `xml:"trafficFilterOverrideAllowed" json:"trafficFilterOverrideAllowed,omitempty"` } func init() { t["DVPortgroupPolicy"] = reflect.TypeOf((*DVPortgroupPolicy)(nil)).Elem() - minAPIVersionForType["DVPortgroupPolicy"] = "4.0" } // Two distributed virtual portgroup was reconfigured. @@ -19215,12 +19269,11 @@ type DVPortgroupReconfiguredEvent struct { // The reconfiguration spec. ConfigSpec DVPortgroupConfigSpec `xml:"configSpec" json:"configSpec"` // The configuration values changed during the reconfiguration. - ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { t["DVPortgroupReconfiguredEvent"] = reflect.TypeOf((*DVPortgroupReconfiguredEvent)(nil)).Elem() - minAPIVersionForType["DVPortgroupReconfiguredEvent"] = "4.0" } // Two distributed virtual portgroup was renamed. @@ -19235,7 +19288,6 @@ type DVPortgroupRenamedEvent struct { func init() { t["DVPortgroupRenamedEvent"] = reflect.TypeOf((*DVPortgroupRenamedEvent)(nil)).Elem() - minAPIVersionForType["DVPortgroupRenamedEvent"] = "4.0" } // The parameters of `DistributedVirtualPortgroup.DVPortgroupRollback_Task`. @@ -19271,7 +19323,6 @@ type DVPortgroupSelection struct { func init() { t["DVPortgroupSelection"] = reflect.TypeOf((*DVPortgroupSelection)(nil)).Elem() - minAPIVersionForType["DVPortgroupSelection"] = "5.0" } // The `DVSBackupRestoreCapability` data object @@ -19290,7 +19341,6 @@ type DVSBackupRestoreCapability struct { func init() { t["DVSBackupRestoreCapability"] = reflect.TypeOf((*DVSBackupRestoreCapability)(nil)).Elem() - minAPIVersionForType["DVSBackupRestoreCapability"] = "5.1" } // The `DVSCapability` data object @@ -19323,12 +19373,11 @@ type DVSCapability struct { // `DVSFeatureCapability*.*DVSFeatureCapability.vmDirectPathGen2Supported` // during switch creation or when you call the // `DistributedVirtualSwitch.UpdateDvsCapability` method. - FeaturesSupported BaseDVSFeatureCapability `xml:"featuresSupported,omitempty,typeattr" json:"featuresSupported,omitempty" vim:"4.1"` + FeaturesSupported BaseDVSFeatureCapability `xml:"featuresSupported,omitempty,typeattr" json:"featuresSupported,omitempty"` } func init() { t["DVSCapability"] = reflect.TypeOf((*DVSCapability)(nil)).Elem() - minAPIVersionForType["DVSCapability"] = "4.0" } // Configuration of a `DistributedVirtualSwitch`. @@ -19393,39 +19442,38 @@ type DVSConfigInfo struct { // // The // utility of this address is defined by other switch features. - SwitchIpAddress string `xml:"switchIpAddress,omitempty" json:"switchIpAddress,omitempty" vim:"5.0"` + SwitchIpAddress string `xml:"switchIpAddress,omitempty" json:"switchIpAddress,omitempty"` // Create time of the switch. CreateTime time.Time `xml:"createTime" json:"createTime"` // Boolean to indicate if network I/O control is enabled on the // switch. - NetworkResourceManagementEnabled *bool `xml:"networkResourceManagementEnabled" json:"networkResourceManagementEnabled,omitempty" vim:"4.1"` + NetworkResourceManagementEnabled *bool `xml:"networkResourceManagementEnabled" json:"networkResourceManagementEnabled,omitempty"` // Default host proxy switch maximum port number - DefaultProxySwitchMaxNumPorts int32 `xml:"defaultProxySwitchMaxNumPorts,omitempty" json:"defaultProxySwitchMaxNumPorts,omitempty" vim:"5.1"` + DefaultProxySwitchMaxNumPorts int32 `xml:"defaultProxySwitchMaxNumPorts,omitempty" json:"defaultProxySwitchMaxNumPorts,omitempty"` // VDS health check configuration. - HealthCheckConfig []BaseDVSHealthCheckConfig `xml:"healthCheckConfig,omitempty,typeattr" json:"healthCheckConfig,omitempty" vim:"5.1"` + HealthCheckConfig []BaseDVSHealthCheckConfig `xml:"healthCheckConfig,omitempty,typeattr" json:"healthCheckConfig,omitempty"` // Host infrastructure traffic class resource configuration. - InfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"infrastructureTrafficResourceConfig,omitempty" json:"infrastructureTrafficResourceConfig,omitempty" vim:"6.0"` + InfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"infrastructureTrafficResourceConfig,omitempty" json:"infrastructureTrafficResourceConfig,omitempty"` // Dynamic Host infrastructure traffic class resource configuration. - NetResourcePoolTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"netResourcePoolTrafficResourceConfig,omitempty" json:"netResourcePoolTrafficResourceConfig,omitempty" vim:"6.7"` + NetResourcePoolTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"netResourcePoolTrafficResourceConfig,omitempty" json:"netResourcePoolTrafficResourceConfig,omitempty"` // Network resource control version of the switch. // // Possible value can be of // `DistributedVirtualSwitchNetworkResourceControlVersion_enum`. - NetworkResourceControlVersion string `xml:"networkResourceControlVersion,omitempty" json:"networkResourceControlVersion,omitempty" vim:"6.0"` + NetworkResourceControlVersion string `xml:"networkResourceControlVersion,omitempty" json:"networkResourceControlVersion,omitempty"` // The Virtual NIC network resource pool information for the switch. - VmVnicNetworkResourcePool []DVSVmVnicNetworkResourcePool `xml:"vmVnicNetworkResourcePool,omitempty" json:"vmVnicNetworkResourcePool,omitempty" vim:"6.0"` + VmVnicNetworkResourcePool []DVSVmVnicNetworkResourcePool `xml:"vmVnicNetworkResourcePool,omitempty" json:"vmVnicNetworkResourcePool,omitempty"` // The percentage of physical nic link speed // `PhysicalNicLinkInfo.speedMb` // available for infrastructure traffic reservation. // // If this value is 75, then for a 1Gbps physical nic, only // 750Mbps is allowed for all infrastructure traffic reservations. - PnicCapacityRatioForReservation int32 `xml:"pnicCapacityRatioForReservation,omitempty" json:"pnicCapacityRatioForReservation,omitempty" vim:"6.0"` + PnicCapacityRatioForReservation int32 `xml:"pnicCapacityRatioForReservation,omitempty" json:"pnicCapacityRatioForReservation,omitempty"` } func init() { t["DVSConfigInfo"] = reflect.TypeOf((*DVSConfigInfo)(nil)).Elem() - minAPIVersionForType["DVSConfigInfo"] = "4.0" } // The `DVSConfigSpec` @@ -19502,28 +19550,27 @@ type DVSConfigSpec struct { // IPv6 address is not supported for this property. // The utility of this address is defined by other switch features. // switchIpAddress would be ignored when IPFIX collector uses IPv6. - SwitchIpAddress string `xml:"switchIpAddress,omitempty" json:"switchIpAddress,omitempty" vim:"5.0"` + SwitchIpAddress string `xml:"switchIpAddress,omitempty" json:"switchIpAddress,omitempty"` // The default host proxy switch maximum port number - DefaultProxySwitchMaxNumPorts int32 `xml:"defaultProxySwitchMaxNumPorts,omitempty" json:"defaultProxySwitchMaxNumPorts,omitempty" vim:"5.1"` + DefaultProxySwitchMaxNumPorts int32 `xml:"defaultProxySwitchMaxNumPorts,omitempty" json:"defaultProxySwitchMaxNumPorts,omitempty"` // The host infrastructure traffic resource allocation specification. // // Only the traffic class resource allocations identified in the list // will be updated. The other traffic class resource allocations that are not // specified will not change. - InfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"infrastructureTrafficResourceConfig,omitempty" json:"infrastructureTrafficResourceConfig,omitempty" vim:"6.0"` + InfrastructureTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"infrastructureTrafficResourceConfig,omitempty" json:"infrastructureTrafficResourceConfig,omitempty"` // The dynamic host infrastructure traffic resource allocation // specification. - NetResourcePoolTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"netResourcePoolTrafficResourceConfig,omitempty" json:"netResourcePoolTrafficResourceConfig,omitempty" vim:"6.7"` + NetResourcePoolTrafficResourceConfig []DvsHostInfrastructureTrafficResource `xml:"netResourcePoolTrafficResourceConfig,omitempty" json:"netResourcePoolTrafficResourceConfig,omitempty"` // Indicates the Network Resource Control APIs that are supported on the switch. // // Possible value can be of // `DistributedVirtualSwitchNetworkResourceControlVersion_enum`. - NetworkResourceControlVersion string `xml:"networkResourceControlVersion,omitempty" json:"networkResourceControlVersion,omitempty" vim:"6.0"` + NetworkResourceControlVersion string `xml:"networkResourceControlVersion,omitempty" json:"networkResourceControlVersion,omitempty"` } func init() { t["DVSConfigSpec"] = reflect.TypeOf((*DVSConfigSpec)(nil)).Elem() - minAPIVersionForType["DVSConfigSpec"] = "4.0" } // Contact information of a human operator. @@ -19538,7 +19585,6 @@ type DVSContactInfo struct { func init() { t["DVSContactInfo"] = reflect.TypeOf((*DVSContactInfo)(nil)).Elem() - minAPIVersionForType["DVSContactInfo"] = "4.0" } // Specification to create a `DistributedVirtualSwitch`. @@ -19559,7 +19605,6 @@ type DVSCreateSpec struct { func init() { t["DVSCreateSpec"] = reflect.TypeOf((*DVSCreateSpec)(nil)).Elem() - minAPIVersionForType["DVSCreateSpec"] = "4.0" } // This data object type describes the network adapter failover @@ -19569,12 +19614,12 @@ type DVSFailureCriteria struct { // To use link speed as the criteria, _checkSpeed_ must be one of // the following values: - // - `*exact*`: Use exact speed to detect link failure. - // `*speed*` is the configured exact speed in megabits per second. - // - `*minimum*`: Use minimum speed to detect failure. - // `*speed*` is the configured minimum speed in megabits per second. - // - **empty string**: Do not use link speed to detect failure. - // `*speed*` is unused in this case. + // - `*exact*`: Use exact speed to detect link failure. + // `*speed*` is the configured exact speed in megabits per second. + // - `*minimum*`: Use minimum speed to detect failure. + // `*speed*` is the configured minimum speed in megabits per second. + // - **empty string**: Do not use link speed to detect failure. + // `*speed*` is unused in this case. CheckSpeed *StringPolicy `xml:"checkSpeed,omitempty" json:"checkSpeed,omitempty"` // See also `DVSFailureCriteria.checkSpeed`. Speed *IntPolicy `xml:"speed,omitempty" json:"speed,omitempty"` @@ -19615,7 +19660,6 @@ type DVSFailureCriteria struct { func init() { t["DVSFailureCriteria"] = reflect.TypeOf((*DVSFailureCriteria)(nil)).Elem() - minAPIVersionForType["DVSFailureCriteria"] = "4.0" } // The `DVSFeatureCapability` data object @@ -19671,14 +19715,14 @@ type DVSFeatureCapability struct { NetworkResourcePoolHighShareValue int32 `xml:"networkResourcePoolHighShareValue,omitempty" json:"networkResourcePoolHighShareValue,omitempty"` // Network resource management capabilities supported by a // distributed virtual switch. - NetworkResourceManagementCapability *DVSNetworkResourceManagementCapability `xml:"networkResourceManagementCapability,omitempty" json:"networkResourceManagementCapability,omitempty" vim:"5.0"` + NetworkResourceManagementCapability *DVSNetworkResourceManagementCapability `xml:"networkResourceManagementCapability,omitempty" json:"networkResourceManagementCapability,omitempty"` // Health check capabilities supported by a `VmwareDistributedVirtualSwitch`. - HealthCheckCapability BaseDVSHealthCheckCapability `xml:"healthCheckCapability,omitempty,typeattr" json:"healthCheckCapability,omitempty" vim:"5.1"` + HealthCheckCapability BaseDVSHealthCheckCapability `xml:"healthCheckCapability,omitempty,typeattr" json:"healthCheckCapability,omitempty"` // Host rollback capability. // // If rollbackCapability.`DVSRollbackCapability.rollbackSupported` // is true, network operations that disconnect the the host are rolled back. - RollbackCapability *DVSRollbackCapability `xml:"rollbackCapability,omitempty" json:"rollbackCapability,omitempty" vim:"5.1"` + RollbackCapability *DVSRollbackCapability `xml:"rollbackCapability,omitempty" json:"rollbackCapability,omitempty"` // Backup, restore, and rollback capabilities. // // Backup and restore @@ -19693,18 +19737,160 @@ type DVSFeatureCapability struct { // `DistributedVirtualSwitch*.*DistributedVirtualSwitch.DVSRollback_Task` // and `DistributedVirtualPortgroup*.*DistributedVirtualPortgroup.DVPortgroupRollback_Task` // methods. - BackupRestoreCapability *DVSBackupRestoreCapability `xml:"backupRestoreCapability,omitempty" json:"backupRestoreCapability,omitempty" vim:"5.1"` + BackupRestoreCapability *DVSBackupRestoreCapability `xml:"backupRestoreCapability,omitempty" json:"backupRestoreCapability,omitempty"` // Indicates whether Network Filter feature is // supported in vSphere Distributed Switch. - NetworkFilterSupported *bool `xml:"networkFilterSupported" json:"networkFilterSupported,omitempty" vim:"5.5"` + NetworkFilterSupported *bool `xml:"networkFilterSupported" json:"networkFilterSupported,omitempty"` // Indicates whether MAC learning feature is // supported in vSphere Distributed Switch. - MacLearningSupported *bool `xml:"macLearningSupported" json:"macLearningSupported,omitempty" vim:"6.7"` + MacLearningSupported *bool `xml:"macLearningSupported" json:"macLearningSupported,omitempty"` } func init() { t["DVSFeatureCapability"] = reflect.TypeOf((*DVSFeatureCapability)(nil)).Elem() - minAPIVersionForType["DVSFeatureCapability"] = "4.1" +} + +// Base class for connectee filters. +// +// This class serves as a base for different types of connectee filters. +// It has three sub-classes. +type DVSFilterSpecConnecteeSpec struct { + DynamicData +} + +func init() { + t["DVSFilterSpecConnecteeSpec"] = reflect.TypeOf((*DVSFilterSpecConnecteeSpec)(nil)).Elem() + minAPIVersionForType["DVSFilterSpecConnecteeSpec"] = "8.0.3.0" +} + +// Sub-class for connectee filters. +// +// This is for the connectee type to be pnic. +// Two filters will apply, which are pnicName and hostName. +// This connectee whole-name will be made up from two names: pnicName and hostName. +type DVSFilterSpecPnicConnecteeSpec struct { + DVSFilterSpecConnecteeSpec + + // The pnic name to be filtered in the connectee column. + // + // If set, port's connectee type being a pnic whose whole-name including this string are qualified. + PnicNameSpec string `xml:"pnicNameSpec,omitempty" json:"pnicNameSpec,omitempty"` +} + +func init() { + t["DVSFilterSpecPnicConnecteeSpec"] = reflect.TypeOf((*DVSFilterSpecPnicConnecteeSpec)(nil)).Elem() + minAPIVersionForType["DVSFilterSpecPnicConnecteeSpec"] = "8.0.3.0" +} + +// Sub-class for Vlan filters. +// +// This is for the Vlan type to be private Vlan. +type DVSFilterSpecPvlanSpec struct { + DVSFilterSpecVlanSpec + + // The private VLAN ID for ports. + // + // Possible values: + // A value of 0 specifies that you do not want the port associated + // with a VLAN. + // A value from 1 to 4094 specifies a VLAN ID for the port. + // If set, port private vlans matching are qualified. + PvlanId int32 `xml:"pvlanId,omitempty" json:"pvlanId,omitempty"` +} + +func init() { + t["DVSFilterSpecPvlanSpec"] = reflect.TypeOf((*DVSFilterSpecPvlanSpec)(nil)).Elem() + minAPIVersionForType["DVSFilterSpecPvlanSpec"] = "8.0.3.0" +} + +// Sub-class for Vlan filters. +// +// This is for the Vlan type to be trunking. +type DVSFilterSpecTrunkVlanSpec struct { + DVSFilterSpecVlanSpec + + // The VlanId range for the trunk port. + // + // The valid VlanId range is + // from 0 to 4094. Overlapping ranges are allowed. + // If set, port trunk ranges matching are qualified. + Range *NumericRange `xml:"range,omitempty" json:"range,omitempty"` +} + +func init() { + t["DVSFilterSpecTrunkVlanSpec"] = reflect.TypeOf((*DVSFilterSpecTrunkVlanSpec)(nil)).Elem() + minAPIVersionForType["DVSFilterSpecTrunkVlanSpec"] = "8.0.3.0" +} + +// Sub-class for Vlan filters. +// +// This is for the Vlan type to be Vlan. +type DVSFilterSpecVlanIdSpec struct { + DVSFilterSpecVlanSpec + + // The VLAN ID for ports. + // + // Possible values: + // A value of 0 specifies that you do not want the port associated + // with a VLAN. + // A value from 1 to 4094 specifies a VLAN ID for the port. + // If set,port vlans matching are qualified. + VlanId int32 `xml:"vlanId,omitempty" json:"vlanId,omitempty"` +} + +func init() { + t["DVSFilterSpecVlanIdSpec"] = reflect.TypeOf((*DVSFilterSpecVlanIdSpec)(nil)).Elem() + minAPIVersionForType["DVSFilterSpecVlanIdSpec"] = "8.0.3.0" +} + +// Base class for VlanSpec filters. +// +// This class serves as a base for different types of VlanSpec filters. +// It has three sub-classes. +type DVSFilterSpecVlanSpec struct { + DynamicData +} + +func init() { + t["DVSFilterSpecVlanSpec"] = reflect.TypeOf((*DVSFilterSpecVlanSpec)(nil)).Elem() + minAPIVersionForType["DVSFilterSpecVlanSpec"] = "8.0.3.0" +} + +// Sub-class for connectee filters. +// +// This is for the connectee type to be vm. +// Only one filter will apply, whici is vmName. +type DVSFilterSpecVmConnecteeSpec struct { + DVSFilterSpecConnecteeSpec + + // The vm name to be filtered in the connectee column. + // + // If set, port's connectee type being a vm whose name including this string are qualified. + VmNameSpec string `xml:"vmNameSpec,omitempty" json:"vmNameSpec,omitempty"` +} + +func init() { + t["DVSFilterSpecVmConnecteeSpec"] = reflect.TypeOf((*DVSFilterSpecVmConnecteeSpec)(nil)).Elem() + minAPIVersionForType["DVSFilterSpecVmConnecteeSpec"] = "8.0.3.0" +} + +// Sub-class for connectee filters. +// +// This is for the connectee type to be vmknic. +// Two filters will apply, which are vmknicName and hostName. +// This connectee whole-name will be made up from two names: vmknicName and hostName. +type DVSFilterSpecVmknicConnecteeSpec struct { + DVSFilterSpecConnecteeSpec + + // The vmknic name to be filtered in the connectee column. + // + // If set, port's connectee type being a vmknic whose whole-name including this string are qualified. + VmknicNameSpec string `xml:"vmknicNameSpec,omitempty" json:"vmknicNameSpec,omitempty"` +} + +func init() { + t["DVSFilterSpecVmknicConnecteeSpec"] = reflect.TypeOf((*DVSFilterSpecVmknicConnecteeSpec)(nil)).Elem() + minAPIVersionForType["DVSFilterSpecVmknicConnecteeSpec"] = "8.0.3.0" } // Health check capabilities of health check supported by the @@ -19715,7 +19901,6 @@ type DVSHealthCheckCapability struct { func init() { t["DVSHealthCheckCapability"] = reflect.TypeOf((*DVSHealthCheckCapability)(nil)).Elem() - minAPIVersionForType["DVSHealthCheckCapability"] = "5.1" } // The `DVSHealthCheckConfig` data object @@ -19731,7 +19916,6 @@ type DVSHealthCheckConfig struct { func init() { t["DVSHealthCheckConfig"] = reflect.TypeOf((*DVSHealthCheckConfig)(nil)).Elem() - minAPIVersionForType["DVSHealthCheckConfig"] = "5.1" } // This data object type describes the information about the host local port. @@ -19753,7 +19937,6 @@ type DVSHostLocalPortInfo struct { func init() { t["DVSHostLocalPortInfo"] = reflect.TypeOf((*DVSHostLocalPortInfo)(nil)).Elem() - minAPIVersionForType["DVSHostLocalPortInfo"] = "5.1" } // This data object type describes MAC learning policy of a port. @@ -19775,7 +19958,6 @@ type DVSMacLearningPolicy struct { func init() { t["DVSMacLearningPolicy"] = reflect.TypeOf((*DVSMacLearningPolicy)(nil)).Elem() - minAPIVersionForType["DVSMacLearningPolicy"] = "6.7" } // This data object type describes MAC management policy of a port. @@ -19798,7 +19980,6 @@ type DVSMacManagementPolicy struct { func init() { t["DVSMacManagementPolicy"] = reflect.TypeOf((*DVSMacManagementPolicy)(nil)).Elem() - minAPIVersionForType["DVSMacManagementPolicy"] = "6.7" } // Configuration specification for a DistributedVirtualSwitch or @@ -19814,7 +19995,6 @@ type DVSManagerDvsConfigTarget struct { func init() { t["DVSManagerDvsConfigTarget"] = reflect.TypeOf((*DVSManagerDvsConfigTarget)(nil)).Elem() - minAPIVersionForType["DVSManagerDvsConfigTarget"] = "4.0" } // The parameters of `DistributedVirtualSwitchManager.DVSManagerExportEntity_Task`. @@ -19893,6 +20073,7 @@ type DVSManagerLookupDvPortGroupResponse struct { type DVSManagerPhysicalNicsList struct { DynamicData + // Refers instance of `HostSystem`. Host ManagedObjectReference `xml:"host" json:"host"` PhysicalNics []PhysicalNic `xml:"physicalNics,omitempty" json:"physicalNics,omitempty"` } @@ -19928,7 +20109,6 @@ type DVSNameArrayUplinkPortPolicy struct { func init() { t["DVSNameArrayUplinkPortPolicy"] = reflect.TypeOf((*DVSNameArrayUplinkPortPolicy)(nil)).Elem() - minAPIVersionForType["DVSNameArrayUplinkPortPolicy"] = "4.0" } // Dataobject representing the feature capabilities of network resource management @@ -19968,23 +20148,22 @@ type DVSNetworkResourceManagementCapability struct { // Flag to indicate whether Network Resource Control version 3 is supported. // // The API supported by Network Resouce Control version 3 include: - // 1. VM virtual NIC network resource specification - // `VirtualEthernetCardResourceAllocation` - // 2. VM virtual NIC network resource pool specification - // `DVSVmVnicNetworkResourcePool` - // 3. Host infrastructure traffic network resource specification - // `DvsHostInfrastructureTrafficResource` + // 1. VM virtual NIC network resource specification + // `VirtualEthernetCardResourceAllocation` + // 2. VM virtual NIC network resource pool specification + // `DVSVmVnicNetworkResourcePool` + // 3. Host infrastructure traffic network resource specification + // `DvsHostInfrastructureTrafficResource` // // Network Resource Control version 3 is supported for Switch Version 6.0 or later. - NetworkResourceControlVersion3Supported *bool `xml:"networkResourceControlVersion3Supported" json:"networkResourceControlVersion3Supported,omitempty" vim:"6.0"` + NetworkResourceControlVersion3Supported *bool `xml:"networkResourceControlVersion3Supported" json:"networkResourceControlVersion3Supported,omitempty"` // Indicates whether user defined infrastructure traffic pool // supported in vSphere Distributed Switch. - UserDefinedInfraTrafficPoolSupported *bool `xml:"userDefinedInfraTrafficPoolSupported" json:"userDefinedInfraTrafficPoolSupported,omitempty" vim:"6.7"` + UserDefinedInfraTrafficPoolSupported *bool `xml:"userDefinedInfraTrafficPoolSupported" json:"userDefinedInfraTrafficPoolSupported,omitempty"` } func init() { t["DVSNetworkResourceManagementCapability"] = reflect.TypeOf((*DVSNetworkResourceManagementCapability)(nil)).Elem() - minAPIVersionForType["DVSNetworkResourceManagementCapability"] = "5.0" } // Deprecated as of vSphere API 6.0 @@ -20013,7 +20192,6 @@ type DVSNetworkResourcePool struct { func init() { t["DVSNetworkResourcePool"] = reflect.TypeOf((*DVSNetworkResourcePool)(nil)).Elem() - minAPIVersionForType["DVSNetworkResourcePool"] = "4.1" } // Resource allocation information for a network resource pool. @@ -20046,12 +20224,11 @@ type DVSNetworkResourcePoolAllocationInfo struct { // // The tag is a priority value // in the range 0..7 for Quality of Service operations on network traffic. - PriorityTag int32 `xml:"priorityTag,omitempty" json:"priorityTag,omitempty" vim:"5.0"` + PriorityTag int32 `xml:"priorityTag,omitempty" json:"priorityTag,omitempty"` } func init() { t["DVSNetworkResourcePoolAllocationInfo"] = reflect.TypeOf((*DVSNetworkResourcePoolAllocationInfo)(nil)).Elem() - minAPIVersionForType["DVSNetworkResourcePoolAllocationInfo"] = "4.1" } // The `DVSNetworkResourcePoolConfigSpec` data object @@ -20090,14 +20267,13 @@ type DVSNetworkResourcePoolConfigSpec struct { // The property is required for // `DistributedVirtualSwitch*.*DistributedVirtualSwitch.AddNetworkResourcePool` // operations. - Name string `xml:"name,omitempty" json:"name,omitempty" vim:"5.0"` + Name string `xml:"name,omitempty" json:"name,omitempty"` // User-defined description for the resource pool. - Description string `xml:"description,omitempty" json:"description,omitempty" vim:"5.0"` + Description string `xml:"description,omitempty" json:"description,omitempty"` } func init() { t["DVSNetworkResourcePoolConfigSpec"] = reflect.TypeOf((*DVSNetworkResourcePoolConfigSpec)(nil)).Elem() - minAPIVersionForType["DVSNetworkResourcePoolConfigSpec"] = "4.1" } // The switch usage policy types @@ -20121,7 +20297,6 @@ type DVSPolicy struct { func init() { t["DVSPolicy"] = reflect.TypeOf((*DVSPolicy)(nil)).Elem() - minAPIVersionForType["DVSPolicy"] = "4.0" } // The `DVSRollbackCapability` data object @@ -20135,7 +20310,6 @@ type DVSRollbackCapability struct { func init() { t["DVSRollbackCapability"] = reflect.TypeOf((*DVSRollbackCapability)(nil)).Elem() - minAPIVersionForType["DVSRollbackCapability"] = "5.1" } // The parameters of `DistributedVirtualSwitch.DVSRollback_Task`. @@ -20169,12 +20343,11 @@ type DVSRuntimeInfo struct { // Runtime information of the hosts that joined the switch. HostMemberRuntime []HostMemberRuntimeInfo `xml:"hostMemberRuntime,omitempty" json:"hostMemberRuntime,omitempty"` // The bandwidth reservation information for the switch. - ResourceRuntimeInfo *DvsResourceRuntimeInfo `xml:"resourceRuntimeInfo,omitempty" json:"resourceRuntimeInfo,omitempty" vim:"6.0"` + ResourceRuntimeInfo *DvsResourceRuntimeInfo `xml:"resourceRuntimeInfo,omitempty" json:"resourceRuntimeInfo,omitempty"` } func init() { t["DVSRuntimeInfo"] = reflect.TypeOf((*DVSRuntimeInfo)(nil)).Elem() - minAPIVersionForType["DVSRuntimeInfo"] = "5.1" } // This data object type describes security policy governing ports. @@ -20195,7 +20368,6 @@ type DVSSecurityPolicy struct { func init() { t["DVSSecurityPolicy"] = reflect.TypeOf((*DVSSecurityPolicy)(nil)).Elem() - minAPIVersionForType["DVSSecurityPolicy"] = "4.0" } // Class to specify selection criteria of vSphere Distributed Switch. @@ -20208,7 +20380,6 @@ type DVSSelection struct { func init() { t["DVSSelection"] = reflect.TypeOf((*DVSSelection)(nil)).Elem() - minAPIVersionForType["DVSSelection"] = "5.0" } // Summary of the distributed switch configuration. @@ -20256,12 +20427,11 @@ type DVSSummary struct { // // The value of this property // is not affected by the privileges granted to the current user. - NumHosts int32 `xml:"numHosts,omitempty" json:"numHosts,omitempty" vim:"5.5"` + NumHosts int32 `xml:"numHosts,omitempty" json:"numHosts,omitempty"` } func init() { t["DVSSummary"] = reflect.TypeOf((*DVSSummary)(nil)).Elem() - minAPIVersionForType["DVSSummary"] = "4.0" } // This data object type describes traffic shaping policy. @@ -20284,7 +20454,6 @@ type DVSTrafficShapingPolicy struct { func init() { t["DVSTrafficShapingPolicy"] = reflect.TypeOf((*DVSTrafficShapingPolicy)(nil)).Elem() - minAPIVersionForType["DVSTrafficShapingPolicy"] = "4.0" } // The base class for uplink port policy. @@ -20294,7 +20463,6 @@ type DVSUplinkPortPolicy struct { func init() { t["DVSUplinkPortPolicy"] = reflect.TypeOf((*DVSUplinkPortPolicy)(nil)).Elem() - minAPIVersionForType["DVSUplinkPortPolicy"] = "4.0" } // This data object type describes vendor specific configuration. @@ -20307,7 +20475,6 @@ type DVSVendorSpecificConfig struct { func init() { t["DVSVendorSpecificConfig"] = reflect.TypeOf((*DVSVendorSpecificConfig)(nil)).Elem() - minAPIVersionForType["DVSVendorSpecificConfig"] = "4.0" } // DataObject describing the resource configuration and management of @@ -20329,7 +20496,6 @@ type DVSVmVnicNetworkResourcePool struct { func init() { t["DVSVmVnicNetworkResourcePool"] = reflect.TypeOf((*DVSVmVnicNetworkResourcePool)(nil)).Elem() - minAPIVersionForType["DVSVmVnicNetworkResourcePool"] = "6.0" } // The `DailyTaskScheduler` data object sets the time for daily @@ -20420,7 +20586,6 @@ type DasClusterIsolatedEvent struct { func init() { t["DasClusterIsolatedEvent"] = reflect.TypeOf((*DasClusterIsolatedEvent)(nil)).Elem() - minAPIVersionForType["DasClusterIsolatedEvent"] = "4.0" } // This fault indicates that some error has occurred during the @@ -20433,11 +20598,11 @@ type DasConfigFault struct { // The reason why the HA configuration failed, if known. // // Values should come from `DasConfigFaultDasConfigFaultReason_enum`. - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"4.0"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` // The output (stdout/stderr) from executing the configuration. - Output string `xml:"output,omitempty" json:"output,omitempty" vim:"4.0"` + Output string `xml:"output,omitempty" json:"output,omitempty"` // The list of events containing details why the configuration failed, if known. - Event []BaseEvent `xml:"event,omitempty,typeattr" json:"event,omitempty" vim:"4.0"` + Event []BaseEvent `xml:"event,omitempty,typeattr" json:"event,omitempty"` } func init() { @@ -20472,13 +20637,14 @@ func init() { type DasHeartbeatDatastoreInfo struct { DynamicData - Datastore ManagedObjectReference `xml:"datastore" json:"datastore"` - Hosts []ManagedObjectReference `xml:"hosts" json:"hosts"` + // Refers instance of `Datastore`. + Datastore ManagedObjectReference `xml:"datastore" json:"datastore"` + // Refers instances of `HostSystem`. + Hosts []ManagedObjectReference `xml:"hosts" json:"hosts"` } func init() { t["DasHeartbeatDatastoreInfo"] = reflect.TypeOf((*DasHeartbeatDatastoreInfo)(nil)).Elem() - minAPIVersionForType["DasHeartbeatDatastoreInfo"] = "5.0" } // This event records when a host failure has been detected by HA. @@ -20542,7 +20708,6 @@ type DatabaseSizeEstimate struct { func init() { t["DatabaseSizeEstimate"] = reflect.TypeOf((*DatabaseSizeEstimate)(nil)).Elem() - minAPIVersionForType["DatabaseSizeEstimate"] = "4.0" } // DatabaseSizeParam contains information about a sample inventory. @@ -20568,7 +20733,6 @@ type DatabaseSizeParam struct { func init() { t["DatabaseSizeParam"] = reflect.TypeOf((*DatabaseSizeParam)(nil)).Elem() - minAPIVersionForType["DatabaseSizeParam"] = "4.0" } // BasicConnectInfo consists of essential information about the host. @@ -20603,7 +20767,6 @@ type DatacenterBasicConnectInfo struct { func init() { t["DatacenterBasicConnectInfo"] = reflect.TypeOf((*DatacenterBasicConnectInfo)(nil)).Elem() - minAPIVersionForType["DatacenterBasicConnectInfo"] = "6.7.1" } // Configuration of the datacenter. @@ -20630,7 +20793,6 @@ type DatacenterConfigInfo struct { func init() { t["DatacenterConfigInfo"] = reflect.TypeOf((*DatacenterConfigInfo)(nil)).Elem() - minAPIVersionForType["DatacenterConfigInfo"] = "5.1" } // Changes to apply to the datacenter configuration. @@ -20657,7 +20819,6 @@ type DatacenterConfigSpec struct { func init() { t["DatacenterConfigSpec"] = reflect.TypeOf((*DatacenterConfigSpec)(nil)).Elem() - minAPIVersionForType["DatacenterConfigSpec"] = "5.1" } type DatacenterCreatedEvent struct { @@ -20678,7 +20839,6 @@ type DatacenterEvent struct { func init() { t["DatacenterEvent"] = reflect.TypeOf((*DatacenterEvent)(nil)).Elem() - minAPIVersionForType["DatacenterEvent"] = "2.5" } // The event argument is a Datacenter object. @@ -20767,37 +20927,39 @@ type DatastoreCapability struct { // They do not support configuring this on a per file basis, so for NAS systems // this value is also false. PerFileThinProvisioningSupported bool `xml:"perFileThinProvisioningSupported" json:"perFileThinProvisioningSupported"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Indicates whether the datastore supports Storage I/O Resource Management. - StorageIORMSupported *bool `xml:"storageIORMSupported" json:"storageIORMSupported,omitempty" vim:"4.1"` + StorageIORMSupported *bool `xml:"storageIORMSupported" json:"storageIORMSupported,omitempty"` // Indicates whether the datastore supports native snapshot feature which is // based on Copy-On-Write. - NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported" json:"nativeSnapshotSupported,omitempty" vim:"5.1"` + NativeSnapshotSupported *bool `xml:"nativeSnapshotSupported" json:"nativeSnapshotSupported,omitempty"` // Indicates whether the datastore supports traditional top-level // directory creation. // // See also `DatastoreNamespaceManager`. - TopLevelDirectoryCreateSupported *bool `xml:"topLevelDirectoryCreateSupported" json:"topLevelDirectoryCreateSupported,omitempty" vim:"5.5"` + TopLevelDirectoryCreateSupported *bool `xml:"topLevelDirectoryCreateSupported" json:"topLevelDirectoryCreateSupported,omitempty"` // Indicates whether the datastore supports the Flex-SE(SeSparse) feature. - SeSparseSupported *bool `xml:"seSparseSupported" json:"seSparseSupported,omitempty" vim:"5.5"` + SeSparseSupported *bool `xml:"seSparseSupported" json:"seSparseSupported,omitempty"` // Indicates whether the datastore supports the vmfsSparse feature. // // True for VMFS3/VMFS5/NFS/NFS41, False for VMFS6. // If value is undefined, then it should be read as supported. - VmfsSparseSupported *bool `xml:"vmfsSparseSupported" json:"vmfsSparseSupported,omitempty" vim:"6.5"` + VmfsSparseSupported *bool `xml:"vmfsSparseSupported" json:"vmfsSparseSupported,omitempty"` // Indicates whether the datastore supports the vsanSparse feature. - VsanSparseSupported *bool `xml:"vsanSparseSupported" json:"vsanSparseSupported,omitempty" vim:"6.5"` + VsanSparseSupported *bool `xml:"vsanSparseSupported" json:"vsanSparseSupported,omitempty"` // Deprecated as of vSphere API 8.0, and there is no replacement for it. // // Indicates whether the datastore supports the upit feature. - UpitSupported *bool `xml:"upitSupported" json:"upitSupported,omitempty" vim:"6.5"` + UpitSupported *bool `xml:"upitSupported" json:"upitSupported,omitempty"` // On certain datastores (e.g. // // 2016 PMEM datastore) VMDK expand is not supported. // This field tells user if VMDK on this datastore can be expanded or not. // If value is undefined, then it should be read as supported. - VmdkExpandSupported *bool `xml:"vmdkExpandSupported" json:"vmdkExpandSupported,omitempty" vim:"6.7"` + VmdkExpandSupported *bool `xml:"vmdkExpandSupported" json:"vmdkExpandSupported,omitempty"` // Indicates whether the datastore supports clustered VMDK feature. - ClusteredVmdkSupported *bool `xml:"clusteredVmdkSupported" json:"clusteredVmdkSupported,omitempty" vim:"7.0"` + ClusteredVmdkSupported *bool `xml:"clusteredVmdkSupported" json:"clusteredVmdkSupported,omitempty"` } func init() { @@ -20819,7 +20981,6 @@ type DatastoreCapacityIncreasedEvent struct { func init() { t["DatastoreCapacityIncreasedEvent"] = reflect.TypeOf((*DatastoreCapacityIncreasedEvent)(nil)).Elem() - minAPIVersionForType["DatastoreCapacityIncreasedEvent"] = "4.0" } // This event records when a datastore is removed from VirtualCenter. @@ -20930,7 +21091,6 @@ type DatastoreFileCopiedEvent struct { func init() { t["DatastoreFileCopiedEvent"] = reflect.TypeOf((*DatastoreFileCopiedEvent)(nil)).Elem() - minAPIVersionForType["DatastoreFileCopiedEvent"] = "4.0" } // This event records deletion of a file or directory. @@ -20940,7 +21100,6 @@ type DatastoreFileDeletedEvent struct { func init() { t["DatastoreFileDeletedEvent"] = reflect.TypeOf((*DatastoreFileDeletedEvent)(nil)).Elem() - minAPIVersionForType["DatastoreFileDeletedEvent"] = "4.0" } // Base class for events related to datastore file and directory @@ -20955,14 +21114,13 @@ type DatastoreFileEvent struct { // Datastore path of the target file or directory. TargetFile string `xml:"targetFile" json:"targetFile"` // Identifier of the initiator of the file operation. - SourceOfOperation string `xml:"sourceOfOperation,omitempty" json:"sourceOfOperation,omitempty" vim:"6.5"` + SourceOfOperation string `xml:"sourceOfOperation,omitempty" json:"sourceOfOperation,omitempty"` // Indicator whether the datastore file operation succeeded. - Succeeded *bool `xml:"succeeded" json:"succeeded,omitempty" vim:"6.5"` + Succeeded *bool `xml:"succeeded" json:"succeeded,omitempty"` } func init() { t["DatastoreFileEvent"] = reflect.TypeOf((*DatastoreFileEvent)(nil)).Elem() - minAPIVersionForType["DatastoreFileEvent"] = "4.0" } // This event records move of a file or directory. @@ -20977,7 +21135,6 @@ type DatastoreFileMovedEvent struct { func init() { t["DatastoreFileMovedEvent"] = reflect.TypeOf((*DatastoreFileMovedEvent)(nil)).Elem() - minAPIVersionForType["DatastoreFileMovedEvent"] = "4.0" } // Host-specific datastore information. @@ -21004,7 +21161,6 @@ type DatastoreIORMReconfiguredEvent struct { func init() { t["DatastoreIORMReconfiguredEvent"] = reflect.TypeOf((*DatastoreIORMReconfiguredEvent)(nil)).Elem() - minAPIVersionForType["DatastoreIORMReconfiguredEvent"] = "4.1" } // Detailed information about a datastore. @@ -21028,21 +21184,21 @@ type DatastoreInfo struct { // The maximum size of a file that can reside on this file system volume. MaxFileSize int64 `xml:"maxFileSize" json:"maxFileSize"` // The maximum capacity of a virtual disk which can be created on this volume. - MaxVirtualDiskCapacity int64 `xml:"maxVirtualDiskCapacity,omitempty" json:"maxVirtualDiskCapacity,omitempty" vim:"5.5"` + MaxVirtualDiskCapacity int64 `xml:"maxVirtualDiskCapacity,omitempty" json:"maxVirtualDiskCapacity,omitempty"` // The maximum size of a snapshot or a swap file that can reside on this file system volume. - MaxMemoryFileSize int64 `xml:"maxMemoryFileSize,omitempty" json:"maxMemoryFileSize,omitempty" vim:"6.0"` + MaxMemoryFileSize int64 `xml:"maxMemoryFileSize,omitempty" json:"maxMemoryFileSize,omitempty"` // Time when the free-space and capacity values in `DatastoreInfo` and // `DatastoreSummary` were updated. - Timestamp *time.Time `xml:"timestamp" json:"timestamp,omitempty" vim:"4.0"` + Timestamp *time.Time `xml:"timestamp" json:"timestamp,omitempty"` // The unique container ID of the datastore, if applicable. - ContainerId string `xml:"containerId,omitempty" json:"containerId,omitempty" vim:"5.5"` + ContainerId string `xml:"containerId,omitempty" json:"containerId,omitempty"` // vSAN datastore container that this datastore is alias of. // // If this // field is unset then this datastore is not alias of any other vSAN // datastore. // See `DatastoreInfo.containerId`. - AliasOf string `xml:"aliasOf,omitempty" json:"aliasOf,omitempty" vim:"6.7.1"` + AliasOf string `xml:"aliasOf,omitempty" json:"aliasOf,omitempty"` } func init() { @@ -21066,7 +21222,6 @@ type DatastoreMountPathDatastorePair struct { func init() { t["DatastoreMountPathDatastorePair"] = reflect.TypeOf((*DatastoreMountPathDatastorePair)(nil)).Elem() - minAPIVersionForType["DatastoreMountPathDatastorePair"] = "4.1" } type DatastoreNamespaceManagerDirectoryInfo struct { @@ -21080,6 +21235,7 @@ type DatastoreNamespaceManagerDirectoryInfo struct { func init() { t["DatastoreNamespaceManagerDirectoryInfo"] = reflect.TypeOf((*DatastoreNamespaceManagerDirectoryInfo)(nil)).Elem() + minAPIVersionForType["DatastoreNamespaceManagerDirectoryInfo"] = "8.0.1.0" } // This exception is thrown if a datastore is not @@ -21211,7 +21367,7 @@ type DatastoreSummary struct { // value. // It can be explicitly refreshed with the `Datastore.RefreshDatastoreStorageInfo` operation. // This property is valid only if `DatastoreSummary.accessible` is true. - Uncommitted int64 `xml:"uncommitted,omitempty" json:"uncommitted,omitempty" vim:"4.0"` + Uncommitted int64 `xml:"uncommitted,omitempty" json:"uncommitted,omitempty"` // The connectivity status of this datastore. // // If this is set to false, meaning the @@ -21240,7 +21396,7 @@ type DatastoreSummary struct { // // The set of // possible values is described in `DatastoreSummaryMaintenanceModeState_enum`. - MaintenanceMode string `xml:"maintenanceMode,omitempty" json:"maintenanceMode,omitempty" vim:"5.0"` + MaintenanceMode string `xml:"maintenanceMode,omitempty" json:"maintenanceMode,omitempty"` } func init() { @@ -21263,7 +21419,6 @@ type DatastoreVVolContainerFailoverPair struct { func init() { t["DatastoreVVolContainerFailoverPair"] = reflect.TypeOf((*DatastoreVVolContainerFailoverPair)(nil)).Elem() - minAPIVersionForType["DatastoreVVolContainerFailoverPair"] = "6.5" } // The `DateTimeProfile` data object represents host date and time configuration. @@ -21277,7 +21432,6 @@ type DateTimeProfile struct { func init() { t["DateTimeProfile"] = reflect.TypeOf((*DateTimeProfile)(nil)).Elem() - minAPIVersionForType["DateTimeProfile"] = "4.0" } type DecodeLicense DecodeLicenseRequestType @@ -21719,6 +21873,7 @@ type DeleteVStorageObjectExRequestType struct { func init() { t["DeleteVStorageObjectExRequestType"] = reflect.TypeOf((*DeleteVStorageObjectExRequestType)(nil)).Elem() + minAPIVersionForType["DeleteVStorageObjectExRequestType"] = "7.0.2.0" } type DeleteVStorageObjectEx_Task DeleteVStorageObjectExRequestType @@ -21865,7 +22020,6 @@ type DeltaDiskFormatNotSupported struct { func init() { t["DeltaDiskFormatNotSupported"] = reflect.TypeOf((*DeltaDiskFormatNotSupported)(nil)).Elem() - minAPIVersionForType["DeltaDiskFormatNotSupported"] = "5.0" } type DeltaDiskFormatNotSupportedFault DeltaDiskFormatNotSupported @@ -21942,11 +22096,15 @@ type DesiredSoftwareSpec struct { // These components would override the components present in // `DesiredSoftwareSpec.vendorAddOnSpec` and `DesiredSoftwareSpec.baseImageSpec`. Components []DesiredSoftwareSpecComponentSpec `xml:"components,omitempty" json:"components,omitempty" vim:"7.0.2.0"` + // Components which should not be part of the desired software + // spec. + // + // These components are not applied on the host. + RemovedComponents []string `xml:"removedComponents,omitempty" json:"removedComponents,omitempty" vim:"8.0.3.0"` } func init() { t["DesiredSoftwareSpec"] = reflect.TypeOf((*DesiredSoftwareSpec)(nil)).Elem() - minAPIVersionForType["DesiredSoftwareSpec"] = "7.0" } // Describes base-image spec for the ESX host. @@ -21959,7 +22117,6 @@ type DesiredSoftwareSpecBaseImageSpec struct { func init() { t["DesiredSoftwareSpecBaseImageSpec"] = reflect.TypeOf((*DesiredSoftwareSpecBaseImageSpec)(nil)).Elem() - minAPIVersionForType["DesiredSoftwareSpecBaseImageSpec"] = "7.0" } // Component information for the ESX host. @@ -21992,7 +22149,6 @@ type DesiredSoftwareSpecVendorAddOnSpec struct { func init() { t["DesiredSoftwareSpecVendorAddOnSpec"] = reflect.TypeOf((*DesiredSoftwareSpecVendorAddOnSpec)(nil)).Elem() - minAPIVersionForType["DesiredSoftwareSpecVendorAddOnSpec"] = "7.0" } // For one of the networks that the virtual machine is using, the corresponding @@ -22033,7 +22189,6 @@ type DestinationVsanDisabled struct { func init() { t["DestinationVsanDisabled"] = reflect.TypeOf((*DestinationVsanDisabled)(nil)).Elem() - minAPIVersionForType["DestinationVsanDisabled"] = "5.5" } type DestinationVsanDisabledFault DestinationVsanDisabled @@ -22344,7 +22499,6 @@ type DeviceBackedVirtualDiskSpec struct { func init() { t["DeviceBackedVirtualDiskSpec"] = reflect.TypeOf((*DeviceBackedVirtualDiskSpec)(nil)).Elem() - minAPIVersionForType["DeviceBackedVirtualDiskSpec"] = "2.5" } // The device is backed by a backing type which is not supported @@ -22364,7 +22518,6 @@ type DeviceBackingNotSupported struct { func init() { t["DeviceBackingNotSupported"] = reflect.TypeOf((*DeviceBackingNotSupported)(nil)).Elem() - minAPIVersionForType["DeviceBackingNotSupported"] = "2.5" } type DeviceBackingNotSupportedFault BaseDeviceBackingNotSupported @@ -22390,7 +22543,6 @@ type DeviceControllerNotSupported struct { func init() { t["DeviceControllerNotSupported"] = reflect.TypeOf((*DeviceControllerNotSupported)(nil)).Elem() - minAPIVersionForType["DeviceControllerNotSupported"] = "2.5" } type DeviceControllerNotSupportedFault DeviceControllerNotSupported @@ -22409,7 +22561,6 @@ type DeviceGroupId struct { func init() { t["DeviceGroupId"] = reflect.TypeOf((*DeviceGroupId)(nil)).Elem() - minAPIVersionForType["DeviceGroupId"] = "6.5" } // A DeviceHotPlugNotSupported exception is thrown if the specified device @@ -22421,7 +22572,6 @@ type DeviceHotPlugNotSupported struct { func init() { t["DeviceHotPlugNotSupported"] = reflect.TypeOf((*DeviceHotPlugNotSupported)(nil)).Elem() - minAPIVersionForType["DeviceHotPlugNotSupported"] = "2.5 U2" } type DeviceHotPlugNotSupportedFault DeviceHotPlugNotSupported @@ -22470,7 +22620,7 @@ type DeviceNotSupported struct { // if this doesn't make sense in the context. For example, // in the `DisallowedMigrationDeviceAttached` context // we already know the problem. - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"2.5"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { @@ -22496,7 +22646,6 @@ type DeviceUnsupportedForVmPlatform struct { func init() { t["DeviceUnsupportedForVmPlatform"] = reflect.TypeOf((*DeviceUnsupportedForVmPlatform)(nil)).Elem() - minAPIVersionForType["DeviceUnsupportedForVmPlatform"] = "2.5 U2" } type DeviceUnsupportedForVmPlatformFault DeviceUnsupportedForVmPlatform @@ -22519,7 +22668,6 @@ type DeviceUnsupportedForVmVersion struct { func init() { t["DeviceUnsupportedForVmVersion"] = reflect.TypeOf((*DeviceUnsupportedForVmVersion)(nil)).Elem() - minAPIVersionForType["DeviceUnsupportedForVmVersion"] = "2.5 U2" } type DeviceUnsupportedForVmVersionFault DeviceUnsupportedForVmVersion @@ -22548,6 +22696,7 @@ type DiagnosticManagerAuditRecordResult struct { func init() { t["DiagnosticManagerAuditRecordResult"] = reflect.TypeOf((*DiagnosticManagerAuditRecordResult)(nil)).Elem() + minAPIVersionForType["DiagnosticManagerAuditRecordResult"] = "7.0.3.0" } // Describes a location of a diagnostic bundle and the server to which @@ -22601,7 +22750,7 @@ type DiagnosticManagerLogDescriptor struct { // // Typical // mime-types include: - // - text/plain - for a plain log file + // - text/plain - for a plain log file MimeType string `xml:"mimeType" json:"mimeType"` // Localized description of log file. Info BaseDescription `xml:"info,typeattr" json:"info"` @@ -22643,7 +22792,6 @@ type DigestNotSupported struct { func init() { t["DigestNotSupported"] = reflect.TypeOf((*DigestNotSupported)(nil)).Elem() - minAPIVersionForType["DigestNotSupported"] = "6.0" } type DigestNotSupportedFault DigestNotSupported @@ -22660,7 +22808,6 @@ type DirectoryNotEmpty struct { func init() { t["DirectoryNotEmpty"] = reflect.TypeOf((*DirectoryNotEmpty)(nil)).Elem() - minAPIVersionForType["DirectoryNotEmpty"] = "5.0" } type DirectoryNotEmptyFault DirectoryNotEmpty @@ -22679,7 +22826,6 @@ type DisableAdminNotSupported struct { func init() { t["DisableAdminNotSupported"] = reflect.TypeOf((*DisableAdminNotSupported)(nil)).Elem() - minAPIVersionForType["DisableAdminNotSupported"] = "2.5" } type DisableAdminNotSupportedFault DisableAdminNotSupported @@ -22903,7 +23049,6 @@ type DisallowedChangeByService struct { func init() { t["DisallowedChangeByService"] = reflect.TypeOf((*DisallowedChangeByService)(nil)).Elem() - minAPIVersionForType["DisallowedChangeByService"] = "5.0" } type DisallowedChangeByServiceFault DisallowedChangeByService @@ -22974,7 +23119,6 @@ type DisallowedOperationOnFailoverHost struct { func init() { t["DisallowedOperationOnFailoverHost"] = reflect.TypeOf((*DisallowedOperationOnFailoverHost)(nil)).Elem() - minAPIVersionForType["DisallowedOperationOnFailoverHost"] = "4.0" } type DisallowedOperationOnFailoverHostFault DisallowedOperationOnFailoverHost @@ -23017,6 +23161,7 @@ type DisconnectNvmeControllerExRequestType struct { func init() { t["DisconnectNvmeControllerExRequestType"] = reflect.TypeOf((*DisconnectNvmeControllerExRequestType)(nil)).Elem() + minAPIVersionForType["DisconnectNvmeControllerExRequestType"] = "7.0.3.0" } type DisconnectNvmeControllerEx_Task DisconnectNvmeControllerExRequestType @@ -23053,7 +23198,6 @@ type DisconnectedHostsBlockingEVC struct { func init() { t["DisconnectedHostsBlockingEVC"] = reflect.TypeOf((*DisconnectedHostsBlockingEVC)(nil)).Elem() - minAPIVersionForType["DisconnectedHostsBlockingEVC"] = "2.5u2" } type DisconnectedHostsBlockingEVCFault DisconnectedHostsBlockingEVC @@ -23115,7 +23259,6 @@ type DiskChangeExtent struct { func init() { t["DiskChangeExtent"] = reflect.TypeOf((*DiskChangeExtent)(nil)).Elem() - minAPIVersionForType["DiskChangeExtent"] = "4.0" } // Data structure to describe areas in a disk associated with this VM that have @@ -23141,7 +23284,6 @@ type DiskChangeInfo struct { func init() { t["DiskChangeInfo"] = reflect.TypeOf((*DiskChangeInfo)(nil)).Elem() - minAPIVersionForType["DiskChangeInfo"] = "4.0" } // This data object type contains the crypto information of all disks along @@ -23157,7 +23299,6 @@ type DiskCryptoSpec struct { func init() { t["DiskCryptoSpec"] = reflect.TypeOf((*DiskCryptoSpec)(nil)).Elem() - minAPIVersionForType["DiskCryptoSpec"] = "7.0" } // Fault used for disks which have existing, non-VSAN partitions. @@ -23169,7 +23310,6 @@ type DiskHasPartitions struct { func init() { t["DiskHasPartitions"] = reflect.TypeOf((*DiskHasPartitions)(nil)).Elem() - minAPIVersionForType["DiskHasPartitions"] = "5.5" } type DiskHasPartitionsFault DiskHasPartitions @@ -23188,7 +23328,6 @@ type DiskIsLastRemainingNonSSD struct { func init() { t["DiskIsLastRemainingNonSSD"] = reflect.TypeOf((*DiskIsLastRemainingNonSSD)(nil)).Elem() - minAPIVersionForType["DiskIsLastRemainingNonSSD"] = "5.5" } type DiskIsLastRemainingNonSSDFault DiskIsLastRemainingNonSSD @@ -23207,7 +23346,6 @@ type DiskIsNonLocal struct { func init() { t["DiskIsNonLocal"] = reflect.TypeOf((*DiskIsNonLocal)(nil)).Elem() - minAPIVersionForType["DiskIsNonLocal"] = "5.5" } type DiskIsNonLocalFault DiskIsNonLocal @@ -23226,7 +23364,6 @@ type DiskIsUSB struct { func init() { t["DiskIsUSB"] = reflect.TypeOf((*DiskIsUSB)(nil)).Elem() - minAPIVersionForType["DiskIsUSB"] = "5.5" } type DiskIsUSBFault DiskIsUSB @@ -23244,7 +23381,6 @@ type DiskMoveTypeNotSupported struct { func init() { t["DiskMoveTypeNotSupported"] = reflect.TypeOf((*DiskMoveTypeNotSupported)(nil)).Elem() - minAPIVersionForType["DiskMoveTypeNotSupported"] = "4.0" } type DiskMoveTypeNotSupportedFault DiskMoveTypeNotSupported @@ -23284,7 +23420,6 @@ type DiskTooSmall struct { func init() { t["DiskTooSmall"] = reflect.TypeOf((*DiskTooSmall)(nil)).Elem() - minAPIVersionForType["DiskTooSmall"] = "5.5" } type DiskTooSmallFault DiskTooSmall @@ -23390,17 +23525,16 @@ type DistributedVirtualPort struct { // to resurrect the management network connection on a VMkernel virtual NIC. // You cannot use vCenter Server to reconfigure this port and you cannot // reassign the port. - HostLocalPort *bool `xml:"hostLocalPort" json:"hostLocalPort,omitempty" vim:"5.1"` + HostLocalPort *bool `xml:"hostLocalPort" json:"hostLocalPort,omitempty"` // Populate the Id assigned to vmknic or vnic by external management plane // to port, if the port is connected to the nics. - ExternalId string `xml:"externalId,omitempty" json:"externalId,omitempty" vim:"7.0"` + ExternalId string `xml:"externalId,omitempty" json:"externalId,omitempty"` // Populate the segmentPortId assigned to LSP. - SegmentPortId string `xml:"segmentPortId,omitempty" json:"segmentPortId,omitempty" vim:"7.0"` + SegmentPortId string `xml:"segmentPortId,omitempty" json:"segmentPortId,omitempty"` } func init() { t["DistributedVirtualPort"] = reflect.TypeOf((*DistributedVirtualPort)(nil)).Elem() - minAPIVersionForType["DistributedVirtualPort"] = "4.0" } // This class describes a DistributedVirtualPortgroup that a device backing @@ -23428,23 +23562,22 @@ type DistributedVirtualPortgroupInfo struct { Portgroup ManagedObjectReference `xml:"portgroup" json:"portgroup"` // Indicates whether network bandwidth reservation is supported on // the portgroup - NetworkReservationSupported *bool `xml:"networkReservationSupported" json:"networkReservationSupported,omitempty" vim:"6.0"` + NetworkReservationSupported *bool `xml:"networkReservationSupported" json:"networkReservationSupported,omitempty"` // Backing type of portgroup. // // See // `DistributedVirtualPortgroup*.*DistributedVirtualPortgroupBackingType_enum` // for possible values. // The default value is "standard". - BackingType string `xml:"backingType,omitempty" json:"backingType,omitempty" vim:"7.0"` + BackingType string `xml:"backingType,omitempty" json:"backingType,omitempty"` // The logical switch UUID, which is used by NSX portgroup - LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty" json:"logicalSwitchUuid,omitempty" vim:"7.0"` + LogicalSwitchUuid string `xml:"logicalSwitchUuid,omitempty" json:"logicalSwitchUuid,omitempty"` // The segment ID of logical switch, which is used by NSX portroup - SegmentId string `xml:"segmentId,omitempty" json:"segmentId,omitempty" vim:"7.0"` + SegmentId string `xml:"segmentId,omitempty" json:"segmentId,omitempty"` } func init() { t["DistributedVirtualPortgroupInfo"] = reflect.TypeOf((*DistributedVirtualPortgroupInfo)(nil)).Elem() - minAPIVersionForType["DistributedVirtualPortgroupInfo"] = "4.0" } // The `DistributedVirtualPortgroupNsxPortgroupOperationResult` @@ -23468,7 +23601,6 @@ type DistributedVirtualPortgroupNsxPortgroupOperationResult struct { func init() { t["DistributedVirtualPortgroupNsxPortgroupOperationResult"] = reflect.TypeOf((*DistributedVirtualPortgroupNsxPortgroupOperationResult)(nil)).Elem() - minAPIVersionForType["DistributedVirtualPortgroupNsxPortgroupOperationResult"] = "7.0" } // The `DistributedVirtualPortgroupProblem` @@ -23484,7 +23616,6 @@ type DistributedVirtualPortgroupProblem struct { func init() { t["DistributedVirtualPortgroupProblem"] = reflect.TypeOf((*DistributedVirtualPortgroupProblem)(nil)).Elem() - minAPIVersionForType["DistributedVirtualPortgroupProblem"] = "7.0" } // The `DistributedVirtualSwitchHostMember` data object represents an ESXi host that @@ -23499,7 +23630,7 @@ type DistributedVirtualSwitchHostMember struct { DynamicData // Host member runtime state. - RuntimeState *DistributedVirtualSwitchHostMemberRuntimeState `xml:"runtimeState,omitempty" json:"runtimeState,omitempty" vim:"5.0"` + RuntimeState *DistributedVirtualSwitchHostMemberRuntimeState `xml:"runtimeState,omitempty" json:"runtimeState,omitempty"` // Host member configuration. Config DistributedVirtualSwitchHostMemberConfigInfo `xml:"config" json:"config"` // Vendor, product and version information for the proxy switch @@ -23522,12 +23653,11 @@ type DistributedVirtualSwitchHostMember struct { // `HostMemberRuntimeInfo*.*HostMemberRuntimeInfo.statusDetail` instead. // // Additional information regarding the host's current status. - StatusDetail string `xml:"statusDetail,omitempty" json:"statusDetail,omitempty" vim:"4.1"` + StatusDetail string `xml:"statusDetail,omitempty" json:"statusDetail,omitempty"` } func init() { t["DistributedVirtualSwitchHostMember"] = reflect.TypeOf((*DistributedVirtualSwitchHostMember)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMember"] = "4.0" } // Base class. @@ -23537,7 +23667,6 @@ type DistributedVirtualSwitchHostMemberBacking struct { func init() { t["DistributedVirtualSwitchHostMemberBacking"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberBacking)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberBacking"] = "4.0" } // The `DistributedVirtualSwitchHostMemberConfigInfo` data object @@ -23566,21 +23695,21 @@ type DistributedVirtualSwitchHostMemberConfigInfo struct { Backing BaseDistributedVirtualSwitchHostMemberBacking `xml:"backing,typeattr" json:"backing"` // Indicate whether the proxy switch is used by NSX on this particular // host member of the VDS. - NsxSwitch *bool `xml:"nsxSwitch" json:"nsxSwitch,omitempty" vim:"7.0"` + NsxSwitch *bool `xml:"nsxSwitch" json:"nsxSwitch,omitempty"` // Indicate if ENS is enabled for this particular host member of // the VDS. // // It is read only. - EnsEnabled *bool `xml:"ensEnabled" json:"ensEnabled,omitempty" vim:"7.0"` + EnsEnabled *bool `xml:"ensEnabled" json:"ensEnabled,omitempty"` // Indicate if ENS interrupt mode is enabled for this particular host // member of the VDS. // // It is read only. - EnsInterruptEnabled *bool `xml:"ensInterruptEnabled" json:"ensInterruptEnabled,omitempty" vim:"7.0"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled" json:"ensInterruptEnabled,omitempty"` // Indicate which transport zones this host joins by this VDS. - TransportZones []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"transportZones,omitempty" json:"transportZones,omitempty" vim:"7.0"` + TransportZones []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"transportZones,omitempty" json:"transportZones,omitempty"` // Indicate which uplink ports are used by NSX-T. - NsxtUsedUplinkNames []string `xml:"nsxtUsedUplinkNames,omitempty" json:"nsxtUsedUplinkNames,omitempty" vim:"7.0"` + NsxtUsedUplinkNames []string `xml:"nsxtUsedUplinkNames,omitempty" json:"nsxtUsedUplinkNames,omitempty"` // Indicate if network offloading is enabled for this particular host // member of the VDS. // @@ -23590,7 +23719,6 @@ type DistributedVirtualSwitchHostMemberConfigInfo struct { func init() { t["DistributedVirtualSwitchHostMemberConfigInfo"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberConfigInfo)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberConfigInfo"] = "4.0" } // Specification to create or reconfigure ESXi host membership @@ -23624,7 +23752,23 @@ type DistributedVirtualSwitchHostMemberConfigSpec struct { func init() { t["DistributedVirtualSwitchHostMemberConfigSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberConfigSpec)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberConfigSpec"] = "4.0" +} + +// The runtime state of uplink on the host. +type DistributedVirtualSwitchHostMemberHostUplinkState struct { + DynamicData + + // Name of the uplink. + UplinkName string `xml:"uplinkName" json:"uplinkName"` + // The runtime state of the uplink. + // + // See `DistributedVirtualSwitchHostMemberHostUplinkStateState_enum` for supported values. + State string `xml:"state" json:"state"` +} + +func init() { + t["DistributedVirtualSwitchHostMemberHostUplinkState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberHostUplinkState)(nil)).Elem() + minAPIVersionForType["DistributedVirtualSwitchHostMemberHostUplinkState"] = "8.0.3.0" } // The `DistributedVirtualSwitchHostMemberPnicBacking` data object @@ -23646,7 +23790,6 @@ type DistributedVirtualSwitchHostMemberPnicBacking struct { func init() { t["DistributedVirtualSwitchHostMemberPnicBacking"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberPnicBacking)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberPnicBacking"] = "4.0" } // Specification for an individual physical NIC. @@ -23676,7 +23819,6 @@ type DistributedVirtualSwitchHostMemberPnicSpec struct { func init() { t["DistributedVirtualSwitchHostMemberPnicSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberPnicSpec)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberPnicSpec"] = "4.0" } // Runtime state of a host member. @@ -23690,7 +23832,6 @@ type DistributedVirtualSwitchHostMemberRuntimeState struct { func init() { t["DistributedVirtualSwitchHostMemberRuntimeState"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberRuntimeState)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberRuntimeState"] = "5.0" } // Transport zone information. @@ -23707,7 +23848,6 @@ type DistributedVirtualSwitchHostMemberTransportZoneInfo struct { func init() { t["DistributedVirtualSwitchHostMemberTransportZoneInfo"] = reflect.TypeOf((*DistributedVirtualSwitchHostMemberTransportZoneInfo)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostMemberTransportZoneInfo"] = "7.0" } // This data object type is a subset of `AboutInfo`. @@ -23727,7 +23867,6 @@ type DistributedVirtualSwitchHostProductSpec struct { func init() { t["DistributedVirtualSwitchHostProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchHostProductSpec)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchHostProductSpec"] = "4.0" } // This class describes a DistributedVirtualSwitch that a device backing @@ -23745,12 +23884,11 @@ type DistributedVirtualSwitchInfo struct { DistributedVirtualSwitch ManagedObjectReference `xml:"distributedVirtualSwitch" json:"distributedVirtualSwitch"` // Indicates whether network bandwidth reservation is supported on // the switch - NetworkReservationSupported *bool `xml:"networkReservationSupported" json:"networkReservationSupported,omitempty" vim:"6.0"` + NetworkReservationSupported *bool `xml:"networkReservationSupported" json:"networkReservationSupported,omitempty"` } func init() { t["DistributedVirtualSwitchInfo"] = reflect.TypeOf((*DistributedVirtualSwitchInfo)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchInfo"] = "4.0" } // This class defines a data structure to hold opaque binary data @@ -23769,7 +23907,6 @@ type DistributedVirtualSwitchKeyedOpaqueBlob struct { func init() { t["DistributedVirtualSwitchKeyedOpaqueBlob"] = reflect.TypeOf((*DistributedVirtualSwitchKeyedOpaqueBlob)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchKeyedOpaqueBlob"] = "4.0" } // This is the return type for the checkCompatibility method. @@ -23799,7 +23936,6 @@ type DistributedVirtualSwitchManagerCompatibilityResult struct { func init() { t["DistributedVirtualSwitchManagerCompatibilityResult"] = reflect.TypeOf((*DistributedVirtualSwitchManagerCompatibilityResult)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchManagerCompatibilityResult"] = "4.1" } // This class is used to specify ProductSpec for the DVS. @@ -23820,7 +23956,6 @@ type DistributedVirtualSwitchManagerDvsProductSpec struct { func init() { t["DistributedVirtualSwitchManagerDvsProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchManagerDvsProductSpec)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchManagerDvsProductSpec"] = "4.1" } // Check host compatibility against all hosts specified in the array. @@ -23835,7 +23970,6 @@ type DistributedVirtualSwitchManagerHostArrayFilter struct { func init() { t["DistributedVirtualSwitchManagerHostArrayFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostArrayFilter)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchManagerHostArrayFilter"] = "4.1" } // Check host compatibility for all hosts in the container. @@ -23864,7 +23998,6 @@ type DistributedVirtualSwitchManagerHostContainer struct { func init() { t["DistributedVirtualSwitchManagerHostContainer"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostContainer)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchManagerHostContainer"] = "4.1" } // Check host compatibility against all hosts in this @@ -23878,7 +24011,6 @@ type DistributedVirtualSwitchManagerHostContainerFilter struct { func init() { t["DistributedVirtualSwitchManagerHostContainerFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostContainerFilter)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchManagerHostContainerFilter"] = "4.1" } // Base class for filters to check host compatibility. @@ -23894,7 +24026,6 @@ type DistributedVirtualSwitchManagerHostDvsFilterSpec struct { func init() { t["DistributedVirtualSwitchManagerHostDvsFilterSpec"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsFilterSpec)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchManagerHostDvsFilterSpec"] = "4.1" } // Check host compatibility against all hosts in the DVS (or not in the DVS if @@ -23902,12 +24033,12 @@ func init() { type DistributedVirtualSwitchManagerHostDvsMembershipFilter struct { DistributedVirtualSwitchManagerHostDvsFilterSpec + // Refers instance of `DistributedVirtualSwitch`. DistributedVirtualSwitch ManagedObjectReference `xml:"distributedVirtualSwitch" json:"distributedVirtualSwitch"` } func init() { t["DistributedVirtualSwitchManagerHostDvsMembershipFilter"] = reflect.TypeOf((*DistributedVirtualSwitchManagerHostDvsMembershipFilter)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchManagerHostDvsMembershipFilter"] = "4.1" } // The `DistributedVirtualSwitchManagerImportResult` @@ -23935,7 +24066,6 @@ type DistributedVirtualSwitchManagerImportResult struct { func init() { t["DistributedVirtualSwitchManagerImportResult"] = reflect.TypeOf((*DistributedVirtualSwitchManagerImportResult)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchManagerImportResult"] = "5.1" } // Describe the network offload specification of a @@ -23980,7 +24110,6 @@ type DistributedVirtualSwitchPortConnectee struct { func init() { t["DistributedVirtualSwitchPortConnectee"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnectee)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchPortConnectee"] = "4.0" } // The `DistributedVirtualSwitchPortConnection` data object represents a connection @@ -24027,7 +24156,6 @@ type DistributedVirtualSwitchPortConnection struct { func init() { t["DistributedVirtualSwitchPortConnection"] = reflect.TypeOf((*DistributedVirtualSwitchPortConnection)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchPortConnection"] = "4.0" } // The criteria specification for selecting ports. @@ -24048,7 +24176,7 @@ type DistributedVirtualSwitchPortCriteria struct { // If set to false, only // non-NSX ports are qualified. // NSX ports are ports of NSX port group. - NsxPort *bool `xml:"nsxPort" json:"nsxPort,omitempty" vim:"7.0"` + NsxPort *bool `xml:"nsxPort" json:"nsxPort,omitempty"` // Deprecated as of vSphere API 5.5. // // If set, only the ports of which the scope covers the entity are @@ -24074,12 +24202,11 @@ type DistributedVirtualSwitchPortCriteria struct { // If set, only the ports that are present in one of the host are qualified. // // Refers instances of `HostSystem`. - Host []ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty" vim:"6.5"` + Host []ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty"` } func init() { t["DistributedVirtualSwitchPortCriteria"] = reflect.TypeOf((*DistributedVirtualSwitchPortCriteria)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchPortCriteria"] = "4.0" } // Statistic data of a DistributedVirtualPort. @@ -24120,15 +24247,14 @@ type DistributedVirtualSwitchPortStatistics struct { PacketsOutException int64 `xml:"packetsOutException" json:"packetsOutException"` // The number of bytes received at a pnic on the behalf of a port's // connectee (inter-host rx). - BytesInFromPnic int64 `xml:"bytesInFromPnic,omitempty" json:"bytesInFromPnic,omitempty" vim:"6.5"` + BytesInFromPnic int64 `xml:"bytesInFromPnic,omitempty" json:"bytesInFromPnic,omitempty"` // The number of bytes transmitted at a pnic on the behalf of a port's // connectee (inter-host tx). - BytesOutToPnic int64 `xml:"bytesOutToPnic,omitempty" json:"bytesOutToPnic,omitempty" vim:"6.5"` + BytesOutToPnic int64 `xml:"bytesOutToPnic,omitempty" json:"bytesOutToPnic,omitempty"` } func init() { t["DistributedVirtualSwitchPortStatistics"] = reflect.TypeOf((*DistributedVirtualSwitchPortStatistics)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchPortStatistics"] = "4.0" } // This data object type is a subset of `AboutInfo`. @@ -24164,7 +24290,6 @@ type DistributedVirtualSwitchProductSpec struct { func init() { t["DistributedVirtualSwitchProductSpec"] = reflect.TypeOf((*DistributedVirtualSwitchProductSpec)(nil)).Elem() - minAPIVersionForType["DistributedVirtualSwitchProductSpec"] = "4.0" } type DoesCustomizationSpecExist DoesCustomizationSpecExistRequestType @@ -24198,7 +24323,6 @@ type DomainNotFound struct { func init() { t["DomainNotFound"] = reflect.TypeOf((*DomainNotFound)(nil)).Elem() - minAPIVersionForType["DomainNotFound"] = "4.1" } type DomainNotFoundFault DomainNotFound @@ -24289,6 +24413,7 @@ type DropConnectionsRequestType struct { func init() { t["DropConnectionsRequestType"] = reflect.TypeOf((*DropConnectionsRequestType)(nil)).Elem() + minAPIVersionForType["DropConnectionsRequestType"] = "7.0.1.0" } type DropConnectionsResponse struct { @@ -24312,7 +24437,6 @@ type DrsDisabledOnVm struct { func init() { t["DrsDisabledOnVm"] = reflect.TypeOf((*DrsDisabledOnVm)(nil)).Elem() - minAPIVersionForType["DrsDisabledOnVm"] = "4.0" } type DrsDisabledOnVmFault DrsDisabledOnVm @@ -24344,7 +24468,6 @@ type DrsEnteredStandbyModeEvent struct { func init() { t["DrsEnteredStandbyModeEvent"] = reflect.TypeOf((*DrsEnteredStandbyModeEvent)(nil)).Elem() - minAPIVersionForType["DrsEnteredStandbyModeEvent"] = "2.5" } // This event records that a host has begun the process of @@ -24355,7 +24478,6 @@ type DrsEnteringStandbyModeEvent struct { func init() { t["DrsEnteringStandbyModeEvent"] = reflect.TypeOf((*DrsEnteringStandbyModeEvent)(nil)).Elem() - minAPIVersionForType["DrsEnteringStandbyModeEvent"] = "4.0" } // This event records that Distributed Power Management tried to bring a host out @@ -24366,7 +24488,6 @@ type DrsExitStandbyModeFailedEvent struct { func init() { t["DrsExitStandbyModeFailedEvent"] = reflect.TypeOf((*DrsExitStandbyModeFailedEvent)(nil)).Elem() - minAPIVersionForType["DrsExitStandbyModeFailedEvent"] = "4.0" } // This event records that Distributed Power Management brings this host @@ -24377,7 +24498,6 @@ type DrsExitedStandbyModeEvent struct { func init() { t["DrsExitedStandbyModeEvent"] = reflect.TypeOf((*DrsExitedStandbyModeEvent)(nil)).Elem() - minAPIVersionForType["DrsExitedStandbyModeEvent"] = "2.5" } // This event records that a host has begun the process of @@ -24388,7 +24508,6 @@ type DrsExitingStandbyModeEvent struct { func init() { t["DrsExitingStandbyModeEvent"] = reflect.TypeOf((*DrsExitingStandbyModeEvent)(nil)).Elem() - minAPIVersionForType["DrsExitingStandbyModeEvent"] = "4.0" } // This event records DRS invocation failure. @@ -24398,7 +24517,6 @@ type DrsInvocationFailedEvent struct { func init() { t["DrsInvocationFailedEvent"] = reflect.TypeOf((*DrsInvocationFailedEvent)(nil)).Elem() - minAPIVersionForType["DrsInvocationFailedEvent"] = "4.0" } // This event records that DRS has recovered from failure. @@ -24410,7 +24528,6 @@ type DrsRecoveredFromFailureEvent struct { func init() { t["DrsRecoveredFromFailureEvent"] = reflect.TypeOf((*DrsRecoveredFromFailureEvent)(nil)).Elem() - minAPIVersionForType["DrsRecoveredFromFailureEvent"] = "4.0" } // This event records when resource configuration @@ -24443,7 +24560,6 @@ type DrsRuleComplianceEvent struct { func init() { t["DrsRuleComplianceEvent"] = reflect.TypeOf((*DrsRuleComplianceEvent)(nil)).Elem() - minAPIVersionForType["DrsRuleComplianceEvent"] = "4.1" } // This event records when a virtual machine violates a DRS VM-Host rule. @@ -24453,7 +24569,6 @@ type DrsRuleViolationEvent struct { func init() { t["DrsRuleViolationEvent"] = reflect.TypeOf((*DrsRuleViolationEvent)(nil)).Elem() - minAPIVersionForType["DrsRuleViolationEvent"] = "4.1" } // This event records when a virtual machine violates a soft VM-Host rule. @@ -24463,7 +24578,6 @@ type DrsSoftRuleViolationEvent struct { func init() { t["DrsSoftRuleViolationEvent"] = reflect.TypeOf((*DrsSoftRuleViolationEvent)(nil)).Elem() - minAPIVersionForType["DrsSoftRuleViolationEvent"] = "6.0" } // This event records a virtual machine migration that was recommended by DRS. @@ -24482,7 +24596,6 @@ type DrsVmPoweredOnEvent struct { func init() { t["DrsVmPoweredOnEvent"] = reflect.TypeOf((*DrsVmPoweredOnEvent)(nil)).Elem() - minAPIVersionForType["DrsVmPoweredOnEvent"] = "2.5" } // This fault is thrown when DRS tries to migrate a virtual machine to a host, @@ -24498,7 +24611,6 @@ type DrsVmotionIncompatibleFault struct { func init() { t["DrsVmotionIncompatibleFault"] = reflect.TypeOf((*DrsVmotionIncompatibleFault)(nil)).Elem() - minAPIVersionForType["DrsVmotionIncompatibleFault"] = "4.0" } type DrsVmotionIncompatibleFaultFault DrsVmotionIncompatibleFault @@ -24537,7 +24649,6 @@ type DuplicateDisks struct { func init() { t["DuplicateDisks"] = reflect.TypeOf((*DuplicateDisks)(nil)).Elem() - minAPIVersionForType["DuplicateDisks"] = "5.5" } type DuplicateDisksFault DuplicateDisks @@ -24560,7 +24671,6 @@ type DuplicateIpDetectedEvent struct { func init() { t["DuplicateIpDetectedEvent"] = reflect.TypeOf((*DuplicateIpDetectedEvent)(nil)).Elem() - minAPIVersionForType["DuplicateIpDetectedEvent"] = "2.5" } // A DuplicateName exception is thrown because a name already exists @@ -24597,7 +24707,6 @@ type DuplicateVsanNetworkInterface struct { func init() { t["DuplicateVsanNetworkInterface"] = reflect.TypeOf((*DuplicateVsanNetworkInterface)(nil)).Elem() - minAPIVersionForType["DuplicateVsanNetworkInterface"] = "5.5" } type DuplicateVsanNetworkInterfaceFault DuplicateVsanNetworkInterface @@ -24619,7 +24728,6 @@ type DvpgImportEvent struct { func init() { t["DvpgImportEvent"] = reflect.TypeOf((*DvpgImportEvent)(nil)).Elem() - minAPIVersionForType["DvpgImportEvent"] = "5.1" } // This event is generated when a restore operation is @@ -24630,7 +24738,6 @@ type DvpgRestoreEvent struct { func init() { t["DvpgRestoreEvent"] = reflect.TypeOf((*DvpgRestoreEvent)(nil)).Elem() - minAPIVersionForType["DvpgRestoreEvent"] = "5.1" } // This class defines network rule action to accept packets. @@ -24640,7 +24747,6 @@ type DvsAcceptNetworkRuleAction struct { func init() { t["DvsAcceptNetworkRuleAction"] = reflect.TypeOf((*DvsAcceptNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsAcceptNetworkRuleAction"] = "5.5" } // Thrown if a vSphere Distributed Switch apply operation failed to set or remove @@ -24654,7 +24760,6 @@ type DvsApplyOperationFault struct { func init() { t["DvsApplyOperationFault"] = reflect.TypeOf((*DvsApplyOperationFault)(nil)).Elem() - minAPIVersionForType["DvsApplyOperationFault"] = "5.1" } type DvsApplyOperationFaultFault DvsApplyOperationFault @@ -24680,7 +24785,6 @@ type DvsApplyOperationFaultFaultOnObject struct { func init() { t["DvsApplyOperationFaultFaultOnObject"] = reflect.TypeOf((*DvsApplyOperationFaultFaultOnObject)(nil)).Elem() - minAPIVersionForType["DvsApplyOperationFaultFaultOnObject"] = "5.1" } // This class defines network rule action to copy the packet to an @@ -24692,7 +24796,6 @@ type DvsCopyNetworkRuleAction struct { func init() { t["DvsCopyNetworkRuleAction"] = reflect.TypeOf((*DvsCopyNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsCopyNetworkRuleAction"] = "5.5" } // A distributed virtual switch was created. @@ -24705,7 +24808,6 @@ type DvsCreatedEvent struct { func init() { t["DvsCreatedEvent"] = reflect.TypeOf((*DvsCreatedEvent)(nil)).Elem() - minAPIVersionForType["DvsCreatedEvent"] = "4.0" } // A distributed virtual switch was destroyed. @@ -24715,7 +24817,6 @@ type DvsDestroyedEvent struct { func init() { t["DvsDestroyedEvent"] = reflect.TypeOf((*DvsDestroyedEvent)(nil)).Elem() - minAPIVersionForType["DvsDestroyedEvent"] = "4.0" } // This class defines network rule action to drop packets. @@ -24725,7 +24826,6 @@ type DvsDropNetworkRuleAction struct { func init() { t["DvsDropNetworkRuleAction"] = reflect.TypeOf((*DvsDropNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsDropNetworkRuleAction"] = "5.5" } // These are dvs-related events. @@ -24735,7 +24835,6 @@ type DvsEvent struct { func init() { t["DvsEvent"] = reflect.TypeOf((*DvsEvent)(nil)).Elem() - minAPIVersionForType["DvsEvent"] = "4.0" } // The event argument is a Host object. @@ -24750,7 +24849,6 @@ type DvsEventArgument struct { func init() { t["DvsEventArgument"] = reflect.TypeOf((*DvsEventArgument)(nil)).Elem() - minAPIVersionForType["DvsEventArgument"] = "4.0" } // Base class for faults that can be thrown while invoking a distributed virtual switch @@ -24761,7 +24859,6 @@ type DvsFault struct { func init() { t["DvsFault"] = reflect.TypeOf((*DvsFault)(nil)).Elem() - minAPIVersionForType["DvsFault"] = "4.0" } type DvsFaultFault BaseDvsFault @@ -24820,7 +24917,6 @@ type DvsFilterConfig struct { func init() { t["DvsFilterConfig"] = reflect.TypeOf((*DvsFilterConfig)(nil)).Elem() - minAPIVersionForType["DvsFilterConfig"] = "5.5" } // The specification to reconfigure Network Filter. @@ -24855,7 +24951,6 @@ type DvsFilterConfigSpec struct { func init() { t["DvsFilterConfigSpec"] = reflect.TypeOf((*DvsFilterConfigSpec)(nil)).Elem() - minAPIVersionForType["DvsFilterConfigSpec"] = "5.5" } // This class defines Network Filter parameter. @@ -24868,7 +24963,6 @@ type DvsFilterParameter struct { func init() { t["DvsFilterParameter"] = reflect.TypeOf((*DvsFilterParameter)(nil)).Elem() - minAPIVersionForType["DvsFilterParameter"] = "5.5" } // This class defines Network Filter Policy. @@ -24904,7 +24998,6 @@ type DvsFilterPolicy struct { func init() { t["DvsFilterPolicy"] = reflect.TypeOf((*DvsFilterPolicy)(nil)).Elem() - minAPIVersionForType["DvsFilterPolicy"] = "5.5" } // This class defines network rule action to GRE Encapsulate a packet. @@ -24919,7 +25012,6 @@ type DvsGreEncapNetworkRuleAction struct { func init() { t["DvsGreEncapNetworkRuleAction"] = reflect.TypeOf((*DvsGreEncapNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsGreEncapNetworkRuleAction"] = "5.5" } // Health check status of an switch is changed. @@ -24934,7 +25026,6 @@ type DvsHealthStatusChangeEvent struct { func init() { t["DvsHealthStatusChangeEvent"] = reflect.TypeOf((*DvsHealthStatusChangeEvent)(nil)).Elem() - minAPIVersionForType["DvsHealthStatusChangeEvent"] = "5.1" } // The DVS configuration on the host was synchronized with that of @@ -24949,7 +25040,6 @@ type DvsHostBackInSyncEvent struct { func init() { t["DvsHostBackInSyncEvent"] = reflect.TypeOf((*DvsHostBackInSyncEvent)(nil)).Elem() - minAPIVersionForType["DvsHostBackInSyncEvent"] = "4.0" } // This class defines the resource allocation for a host infrastructure @@ -24972,7 +25062,6 @@ type DvsHostInfrastructureTrafficResource struct { func init() { t["DvsHostInfrastructureTrafficResource"] = reflect.TypeOf((*DvsHostInfrastructureTrafficResource)(nil)).Elem() - minAPIVersionForType["DvsHostInfrastructureTrafficResource"] = "6.0" } // Resource allocation information for a @@ -25008,7 +25097,6 @@ type DvsHostInfrastructureTrafficResourceAllocation struct { func init() { t["DvsHostInfrastructureTrafficResourceAllocation"] = reflect.TypeOf((*DvsHostInfrastructureTrafficResourceAllocation)(nil)).Elem() - minAPIVersionForType["DvsHostInfrastructureTrafficResourceAllocation"] = "6.0" } // A host joined the distributed virtual switch. @@ -25021,7 +25109,6 @@ type DvsHostJoinedEvent struct { func init() { t["DvsHostJoinedEvent"] = reflect.TypeOf((*DvsHostJoinedEvent)(nil)).Elem() - minAPIVersionForType["DvsHostJoinedEvent"] = "4.0" } // A host left the distributed virtual switch. @@ -25034,7 +25121,6 @@ type DvsHostLeftEvent struct { func init() { t["DvsHostLeftEvent"] = reflect.TypeOf((*DvsHostLeftEvent)(nil)).Elem() - minAPIVersionForType["DvsHostLeftEvent"] = "4.0" } // A host has it's status or statusDetail updated. @@ -25055,7 +25141,6 @@ type DvsHostStatusUpdated struct { func init() { t["DvsHostStatusUpdated"] = reflect.TypeOf((*DvsHostStatusUpdated)(nil)).Elem() - minAPIVersionForType["DvsHostStatusUpdated"] = "4.1" } // The `DvsHostVNicProfile` data object describes the IP configuration @@ -25071,7 +25156,6 @@ type DvsHostVNicProfile struct { func init() { t["DvsHostVNicProfile"] = reflect.TypeOf((*DvsHostVNicProfile)(nil)).Elem() - minAPIVersionForType["DvsHostVNicProfile"] = "4.0" } // The DVS configuration on the host diverged from that of @@ -25085,7 +25169,6 @@ type DvsHostWentOutOfSyncEvent struct { func init() { t["DvsHostWentOutOfSyncEvent"] = reflect.TypeOf((*DvsHostWentOutOfSyncEvent)(nil)).Elem() - minAPIVersionForType["DvsHostWentOutOfSyncEvent"] = "4.0" } // This event is generated when a import operation is @@ -25101,7 +25184,6 @@ type DvsImportEvent struct { func init() { t["DvsImportEvent"] = reflect.TypeOf((*DvsImportEvent)(nil)).Elem() - minAPIVersionForType["DvsImportEvent"] = "5.1" } // This class defines the IP Rule Qualifier. @@ -25138,7 +25220,6 @@ type DvsIpNetworkRuleQualifier struct { func init() { t["DvsIpNetworkRuleQualifier"] = reflect.TypeOf((*DvsIpNetworkRuleQualifier)(nil)).Elem() - minAPIVersionForType["DvsIpNetworkRuleQualifier"] = "5.5" } // Base class for specifying Ports. @@ -25150,7 +25231,6 @@ type DvsIpPort struct { func init() { t["DvsIpPort"] = reflect.TypeOf((*DvsIpPort)(nil)).Elem() - minAPIVersionForType["DvsIpPort"] = "5.5" } // This class defines a range of Ports. @@ -25165,7 +25245,6 @@ type DvsIpPortRange struct { func init() { t["DvsIpPortRange"] = reflect.TypeOf((*DvsIpPortRange)(nil)).Elem() - minAPIVersionForType["DvsIpPortRange"] = "5.5" } // This class defines network rule action to just log the rule. @@ -25175,7 +25254,6 @@ type DvsLogNetworkRuleAction struct { func init() { t["DvsLogNetworkRuleAction"] = reflect.TypeOf((*DvsLogNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsLogNetworkRuleAction"] = "5.5" } // This class defines the MAC Rule Qualifier. @@ -25205,7 +25283,6 @@ type DvsMacNetworkRuleQualifier struct { func init() { t["DvsMacNetworkRuleQualifier"] = reflect.TypeOf((*DvsMacNetworkRuleQualifier)(nil)).Elem() - minAPIVersionForType["DvsMacNetworkRuleQualifier"] = "5.5" } // This class defines network rule action to MAC Rewrite. @@ -25218,7 +25295,6 @@ type DvsMacRewriteNetworkRuleAction struct { func init() { t["DvsMacRewriteNetworkRuleAction"] = reflect.TypeOf((*DvsMacRewriteNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsMacRewriteNetworkRuleAction"] = "5.5" } // Two distributed virtual switches was merged. @@ -25233,7 +25309,6 @@ type DvsMergedEvent struct { func init() { t["DvsMergedEvent"] = reflect.TypeOf((*DvsMergedEvent)(nil)).Elem() - minAPIVersionForType["DvsMergedEvent"] = "4.0" } // This class is the base class for network rule action. @@ -25243,7 +25318,6 @@ type DvsNetworkRuleAction struct { func init() { t["DvsNetworkRuleAction"] = reflect.TypeOf((*DvsNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsNetworkRuleAction"] = "5.5" } // This class is the base class for identifying network traffic. @@ -25256,7 +25330,6 @@ type DvsNetworkRuleQualifier struct { func init() { t["DvsNetworkRuleQualifier"] = reflect.TypeOf((*DvsNetworkRuleQualifier)(nil)).Elem() - minAPIVersionForType["DvsNetworkRuleQualifier"] = "5.5" } // Thrown if @@ -25274,7 +25347,6 @@ type DvsNotAuthorized struct { func init() { t["DvsNotAuthorized"] = reflect.TypeOf((*DvsNotAuthorized)(nil)).Elem() - minAPIVersionForType["DvsNotAuthorized"] = "4.0" } type DvsNotAuthorizedFault DvsNotAuthorized @@ -25293,7 +25365,6 @@ type DvsOperationBulkFault struct { func init() { t["DvsOperationBulkFault"] = reflect.TypeOf((*DvsOperationBulkFault)(nil)).Elem() - minAPIVersionForType["DvsOperationBulkFault"] = "4.0" } type DvsOperationBulkFaultFault DvsOperationBulkFault @@ -25316,7 +25387,6 @@ type DvsOperationBulkFaultFaultOnHost struct { func init() { t["DvsOperationBulkFaultFaultOnHost"] = reflect.TypeOf((*DvsOperationBulkFaultFaultOnHost)(nil)).Elem() - minAPIVersionForType["DvsOperationBulkFaultFaultOnHost"] = "4.0" } // The host on which the DVS configuration is different from that @@ -25333,7 +25403,6 @@ type DvsOutOfSyncHostArgument struct { func init() { t["DvsOutOfSyncHostArgument"] = reflect.TypeOf((*DvsOutOfSyncHostArgument)(nil)).Elem() - minAPIVersionForType["DvsOutOfSyncHostArgument"] = "4.0" } // A port is blocked in the distributed virtual switch. @@ -25343,18 +25412,17 @@ type DvsPortBlockedEvent struct { // The port key. PortKey string `xml:"portKey" json:"portKey"` // Reason for port's current status - StatusDetail string `xml:"statusDetail,omitempty" json:"statusDetail,omitempty" vim:"4.1"` + StatusDetail string `xml:"statusDetail,omitempty" json:"statusDetail,omitempty"` // The port runtime information. - RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty" vim:"5.1"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty"` // Previous state of the DvsPort. // // See `DvsEventPortBlockState_enum` - PrevBlockState string `xml:"prevBlockState,omitempty" json:"prevBlockState,omitempty" vim:"6.5"` + PrevBlockState string `xml:"prevBlockState,omitempty" json:"prevBlockState,omitempty"` } func init() { t["DvsPortBlockedEvent"] = reflect.TypeOf((*DvsPortBlockedEvent)(nil)).Elem() - minAPIVersionForType["DvsPortBlockedEvent"] = "4.0" } // A port is connected in the distributed virtual switch. @@ -25369,7 +25437,6 @@ type DvsPortConnectedEvent struct { func init() { t["DvsPortConnectedEvent"] = reflect.TypeOf((*DvsPortConnectedEvent)(nil)).Elem() - minAPIVersionForType["DvsPortConnectedEvent"] = "4.0" } // New ports are created in the distributed virtual switch. @@ -25382,7 +25449,6 @@ type DvsPortCreatedEvent struct { func init() { t["DvsPortCreatedEvent"] = reflect.TypeOf((*DvsPortCreatedEvent)(nil)).Elem() - minAPIVersionForType["DvsPortCreatedEvent"] = "4.0" } // Existing ports are deleted in the distributed virtual switch. @@ -25395,7 +25461,6 @@ type DvsPortDeletedEvent struct { func init() { t["DvsPortDeletedEvent"] = reflect.TypeOf((*DvsPortDeletedEvent)(nil)).Elem() - minAPIVersionForType["DvsPortDeletedEvent"] = "4.0" } // A port is disconnected in the distributed virtual switch. @@ -25410,7 +25475,6 @@ type DvsPortDisconnectedEvent struct { func init() { t["DvsPortDisconnectedEvent"] = reflect.TypeOf((*DvsPortDisconnectedEvent)(nil)).Elem() - minAPIVersionForType["DvsPortDisconnectedEvent"] = "4.0" } // A port has entered passthrough mode on the distributed virtual switch. @@ -25420,12 +25484,11 @@ type DvsPortEnteredPassthruEvent struct { // The port key. PortKey string `xml:"portKey" json:"portKey"` // The port runtime information. - RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty" vim:"5.1"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty"` } func init() { t["DvsPortEnteredPassthruEvent"] = reflect.TypeOf((*DvsPortEnteredPassthruEvent)(nil)).Elem() - minAPIVersionForType["DvsPortEnteredPassthruEvent"] = "4.1" } // A port has exited passthrough mode on the distributed virtual switch. @@ -25435,12 +25498,11 @@ type DvsPortExitedPassthruEvent struct { // The port key. PortKey string `xml:"portKey" json:"portKey"` // The port runtime information. - RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty" vim:"5.1"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty"` } func init() { t["DvsPortExitedPassthruEvent"] = reflect.TypeOf((*DvsPortExitedPassthruEvent)(nil)).Elem() - minAPIVersionForType["DvsPortExitedPassthruEvent"] = "4.1" } // A port was moved into the distributed virtual portgroup. @@ -25457,7 +25519,6 @@ type DvsPortJoinPortgroupEvent struct { func init() { t["DvsPortJoinPortgroupEvent"] = reflect.TypeOf((*DvsPortJoinPortgroupEvent)(nil)).Elem() - minAPIVersionForType["DvsPortJoinPortgroupEvent"] = "4.0" } // A port was moved out of the distributed virtual portgroup. @@ -25474,7 +25535,6 @@ type DvsPortLeavePortgroupEvent struct { func init() { t["DvsPortLeavePortgroupEvent"] = reflect.TypeOf((*DvsPortLeavePortgroupEvent)(nil)).Elem() - minAPIVersionForType["DvsPortLeavePortgroupEvent"] = "4.0" } // A port of which link status is changed to down in the distributed @@ -25485,12 +25545,11 @@ type DvsPortLinkDownEvent struct { // The port key. PortKey string `xml:"portKey" json:"portKey"` // The port runtime information. - RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty" vim:"5.1"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty"` } func init() { t["DvsPortLinkDownEvent"] = reflect.TypeOf((*DvsPortLinkDownEvent)(nil)).Elem() - minAPIVersionForType["DvsPortLinkDownEvent"] = "4.0" } // A port of which link status is changed to up in the distributed @@ -25501,12 +25560,11 @@ type DvsPortLinkUpEvent struct { // The port key. PortKey string `xml:"portKey" json:"portKey"` // The port runtime information. - RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty" vim:"5.1"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty"` } func init() { t["DvsPortLinkUpEvent"] = reflect.TypeOf((*DvsPortLinkUpEvent)(nil)).Elem() - minAPIVersionForType["DvsPortLinkUpEvent"] = "4.0" } // Existing ports are reconfigured in the distributed virtual switch. @@ -25516,12 +25574,11 @@ type DvsPortReconfiguredEvent struct { // The key of the ports that are reconfigured. PortKey []string `xml:"portKey" json:"portKey"` // The configuration values changed during the reconfiguration. - ConfigChanges []ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges []ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { t["DvsPortReconfiguredEvent"] = reflect.TypeOf((*DvsPortReconfiguredEvent)(nil)).Elem() - minAPIVersionForType["DvsPortReconfiguredEvent"] = "4.0" } // A port of which runtime information is changed in the vNetwork Distributed @@ -25537,7 +25594,6 @@ type DvsPortRuntimeChangeEvent struct { func init() { t["DvsPortRuntimeChangeEvent"] = reflect.TypeOf((*DvsPortRuntimeChangeEvent)(nil)).Elem() - minAPIVersionForType["DvsPortRuntimeChangeEvent"] = "5.1" } // A port is unblocked in the distributed virtual switch. @@ -25547,16 +25603,15 @@ type DvsPortUnblockedEvent struct { // The port key. PortKey string `xml:"portKey" json:"portKey"` // The port runtime information. - RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty" vim:"5.1"` + RuntimeInfo *DVPortStatus `xml:"runtimeInfo,omitempty" json:"runtimeInfo,omitempty"` // Previous state of the DvsPort. // // See `DvsEventPortBlockState_enum` - PrevBlockState string `xml:"prevBlockState,omitempty" json:"prevBlockState,omitempty" vim:"6.5"` + PrevBlockState string `xml:"prevBlockState,omitempty" json:"prevBlockState,omitempty"` } func init() { t["DvsPortUnblockedEvent"] = reflect.TypeOf((*DvsPortUnblockedEvent)(nil)).Elem() - minAPIVersionForType["DvsPortUnblockedEvent"] = "4.0" } // A port of which vendor specific state is changed in the vNetwork Distributed @@ -25570,7 +25625,6 @@ type DvsPortVendorSpecificStateChangeEvent struct { func init() { t["DvsPortVendorSpecificStateChangeEvent"] = reflect.TypeOf((*DvsPortVendorSpecificStateChangeEvent)(nil)).Elem() - minAPIVersionForType["DvsPortVendorSpecificStateChangeEvent"] = "5.1" } // The `DvsProfile` data object represents the distributed virtual switch @@ -25595,7 +25649,6 @@ type DvsProfile struct { func init() { t["DvsProfile"] = reflect.TypeOf((*DvsProfile)(nil)).Elem() - minAPIVersionForType["DvsProfile"] = "4.0" } // This class defines network rule action to punt. @@ -25608,7 +25661,6 @@ type DvsPuntNetworkRuleAction struct { func init() { t["DvsPuntNetworkRuleAction"] = reflect.TypeOf((*DvsPuntNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsPuntNetworkRuleAction"] = "5.5" } // This class defines network rule action to ratelimit packets. @@ -25621,7 +25673,6 @@ type DvsRateLimitNetworkRuleAction struct { func init() { t["DvsRateLimitNetworkRuleAction"] = reflect.TypeOf((*DvsRateLimitNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsRateLimitNetworkRuleAction"] = "5.5" } // The parameters of `DistributedVirtualSwitch.DvsReconfigureVmVnicNetworkResourcePool_Task`. @@ -25652,12 +25703,11 @@ type DvsReconfiguredEvent struct { // The reconfiguration spec. ConfigSpec BaseDVSConfigSpec `xml:"configSpec,typeattr" json:"configSpec"` // The configuration values changed during the reconfiguration. - ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { t["DvsReconfiguredEvent"] = reflect.TypeOf((*DvsReconfiguredEvent)(nil)).Elem() - minAPIVersionForType["DvsReconfiguredEvent"] = "4.0" } // A distributed virtual switch was renamed. @@ -25672,7 +25722,6 @@ type DvsRenamedEvent struct { func init() { t["DvsRenamedEvent"] = reflect.TypeOf((*DvsRenamedEvent)(nil)).Elem() - minAPIVersionForType["DvsRenamedEvent"] = "4.0" } // This class defines the bandwidth reservation information for the @@ -25706,7 +25755,6 @@ type DvsResourceRuntimeInfo struct { func init() { t["DvsResourceRuntimeInfo"] = reflect.TypeOf((*DvsResourceRuntimeInfo)(nil)).Elem() - minAPIVersionForType["DvsResourceRuntimeInfo"] = "6.0" } // This event is generated when a restore operation is @@ -25717,7 +25765,6 @@ type DvsRestoreEvent struct { func init() { t["DvsRestoreEvent"] = reflect.TypeOf((*DvsRestoreEvent)(nil)).Elem() - minAPIVersionForType["DvsRestoreEvent"] = "5.1" } // Deprecated as of vSphere API 5.5. @@ -25739,7 +25786,6 @@ type DvsScopeViolated struct { func init() { t["DvsScopeViolated"] = reflect.TypeOf((*DvsScopeViolated)(nil)).Elem() - minAPIVersionForType["DvsScopeViolated"] = "4.0" } type DvsScopeViolatedFault DvsScopeViolated @@ -25761,7 +25807,6 @@ type DvsServiceConsoleVNicProfile struct { func init() { t["DvsServiceConsoleVNicProfile"] = reflect.TypeOf((*DvsServiceConsoleVNicProfile)(nil)).Elem() - minAPIVersionForType["DvsServiceConsoleVNicProfile"] = "4.0" } // This class defines a Single Port @@ -25774,7 +25819,6 @@ type DvsSingleIpPort struct { func init() { t["DvsSingleIpPort"] = reflect.TypeOf((*DvsSingleIpPort)(nil)).Elem() - minAPIVersionForType["DvsSingleIpPort"] = "5.5" } // This class defines the System Traffic Qualifier. @@ -25793,7 +25837,6 @@ type DvsSystemTrafficNetworkRuleQualifier struct { func init() { t["DvsSystemTrafficNetworkRuleQualifier"] = reflect.TypeOf((*DvsSystemTrafficNetworkRuleQualifier)(nil)).Elem() - minAPIVersionForType["DvsSystemTrafficNetworkRuleQualifier"] = "5.5" } // This class defines Traffic Filter configuration. @@ -25834,7 +25877,6 @@ type DvsTrafficFilterConfig struct { func init() { t["DvsTrafficFilterConfig"] = reflect.TypeOf((*DvsTrafficFilterConfig)(nil)).Elem() - minAPIVersionForType["DvsTrafficFilterConfig"] = "5.5" } // The specification to reconfigure Traffic Filter. @@ -25869,7 +25911,6 @@ type DvsTrafficFilterConfigSpec struct { func init() { t["DvsTrafficFilterConfigSpec"] = reflect.TypeOf((*DvsTrafficFilterConfigSpec)(nil)).Elem() - minAPIVersionForType["DvsTrafficFilterConfigSpec"] = "5.5" } // This class defines a single rule that will be applied to network traffic. @@ -25907,7 +25948,6 @@ type DvsTrafficRule struct { func init() { t["DvsTrafficRule"] = reflect.TypeOf((*DvsTrafficRule)(nil)).Elem() - minAPIVersionForType["DvsTrafficRule"] = "5.5" } // This class defines a ruleset(set of rules) that will be @@ -25930,7 +25970,6 @@ type DvsTrafficRuleset struct { func init() { t["DvsTrafficRuleset"] = reflect.TypeOf((*DvsTrafficRuleset)(nil)).Elem() - minAPIVersionForType["DvsTrafficRuleset"] = "5.5" } // This class defines network rule action to tag packets(qos,dscp) or @@ -25959,7 +25998,6 @@ type DvsUpdateTagNetworkRuleAction struct { func init() { t["DvsUpdateTagNetworkRuleAction"] = reflect.TypeOf((*DvsUpdateTagNetworkRuleAction)(nil)).Elem() - minAPIVersionForType["DvsUpdateTagNetworkRuleAction"] = "5.5" } // An upgrade for the distributed virtual switch is available. @@ -25972,7 +26010,6 @@ type DvsUpgradeAvailableEvent struct { func init() { t["DvsUpgradeAvailableEvent"] = reflect.TypeOf((*DvsUpgradeAvailableEvent)(nil)).Elem() - minAPIVersionForType["DvsUpgradeAvailableEvent"] = "4.0" } // An upgrade for the distributed virtual switch is in progress. @@ -25985,7 +26022,6 @@ type DvsUpgradeInProgressEvent struct { func init() { t["DvsUpgradeInProgressEvent"] = reflect.TypeOf((*DvsUpgradeInProgressEvent)(nil)).Elem() - minAPIVersionForType["DvsUpgradeInProgressEvent"] = "4.0" } // An upgrade for the distributed virtual switch is rejected. @@ -25998,7 +26034,6 @@ type DvsUpgradeRejectedEvent struct { func init() { t["DvsUpgradeRejectedEvent"] = reflect.TypeOf((*DvsUpgradeRejectedEvent)(nil)).Elem() - minAPIVersionForType["DvsUpgradeRejectedEvent"] = "4.0" } // The distributed virtual switch was upgraded. @@ -26011,7 +26046,6 @@ type DvsUpgradedEvent struct { func init() { t["DvsUpgradedEvent"] = reflect.TypeOf((*DvsUpgradedEvent)(nil)).Elem() - minAPIVersionForType["DvsUpgradedEvent"] = "4.0" } // The `DvsVNicProfile` data object is the base object @@ -26031,7 +26065,6 @@ type DvsVNicProfile struct { func init() { t["DvsVNicProfile"] = reflect.TypeOf((*DvsVNicProfile)(nil)).Elem() - minAPIVersionForType["DvsVNicProfile"] = "4.0" } // This class defines the runtime information for the @@ -26078,7 +26111,6 @@ type DvsVmVnicNetworkResourcePoolRuntimeInfo struct { func init() { t["DvsVmVnicNetworkResourcePoolRuntimeInfo"] = reflect.TypeOf((*DvsVmVnicNetworkResourcePoolRuntimeInfo)(nil)).Elem() - minAPIVersionForType["DvsVmVnicNetworkResourcePoolRuntimeInfo"] = "6.0" } // Resource allocation information for a virtual NIC network resource pool. @@ -26093,7 +26125,6 @@ type DvsVmVnicResourceAllocation struct { func init() { t["DvsVmVnicResourceAllocation"] = reflect.TypeOf((*DvsVmVnicResourceAllocation)(nil)).Elem() - minAPIVersionForType["DvsVmVnicResourceAllocation"] = "6.0" } // The configuration specification data object to update the resource configuration @@ -26136,7 +26167,6 @@ type DvsVmVnicResourcePoolConfigSpec struct { func init() { t["DvsVmVnicResourcePoolConfigSpec"] = reflect.TypeOf((*DvsVmVnicResourcePoolConfigSpec)(nil)).Elem() - minAPIVersionForType["DvsVmVnicResourcePoolConfigSpec"] = "6.0" } // This class defines the allocated resource information on a virtual NIC @@ -26157,7 +26187,6 @@ type DvsVnicAllocatedResource struct { func init() { t["DvsVnicAllocatedResource"] = reflect.TypeOf((*DvsVnicAllocatedResource)(nil)).Elem() - minAPIVersionForType["DvsVnicAllocatedResource"] = "6.0" } // DynamicArray is a data object type that represents an array of dynamically-typed @@ -26203,12 +26232,11 @@ type EVCAdmissionFailed struct { // (e.g. // // FeatureRequirementsNotMet faults). - Faults []LocalizedMethodFault `xml:"faults,omitempty" json:"faults,omitempty" vim:"5.1"` + Faults []LocalizedMethodFault `xml:"faults,omitempty" json:"faults,omitempty"` } func init() { t["EVCAdmissionFailed"] = reflect.TypeOf((*EVCAdmissionFailed)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailed"] = "4.0" } // The host's CPU hardware is a family/model that should support the @@ -26224,7 +26252,6 @@ type EVCAdmissionFailedCPUFeaturesForMode struct { func init() { t["EVCAdmissionFailedCPUFeaturesForMode"] = reflect.TypeOf((*EVCAdmissionFailedCPUFeaturesForMode)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedCPUFeaturesForMode"] = "4.0" } type EVCAdmissionFailedCPUFeaturesForModeFault EVCAdmissionFailedCPUFeaturesForMode @@ -26241,7 +26268,6 @@ type EVCAdmissionFailedCPUModel struct { func init() { t["EVCAdmissionFailedCPUModel"] = reflect.TypeOf((*EVCAdmissionFailedCPUModel)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedCPUModel"] = "4.0" } type EVCAdmissionFailedCPUModelFault EVCAdmissionFailedCPUModel @@ -26262,7 +26288,6 @@ type EVCAdmissionFailedCPUModelForMode struct { func init() { t["EVCAdmissionFailedCPUModelForMode"] = reflect.TypeOf((*EVCAdmissionFailedCPUModelForMode)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedCPUModelForMode"] = "4.0" } type EVCAdmissionFailedCPUModelForModeFault EVCAdmissionFailedCPUModelForMode @@ -26284,7 +26309,6 @@ type EVCAdmissionFailedCPUVendor struct { func init() { t["EVCAdmissionFailedCPUVendor"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendor)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedCPUVendor"] = "4.0" } type EVCAdmissionFailedCPUVendorFault EVCAdmissionFailedCPUVendor @@ -26301,7 +26325,6 @@ type EVCAdmissionFailedCPUVendorUnknown struct { func init() { t["EVCAdmissionFailedCPUVendorUnknown"] = reflect.TypeOf((*EVCAdmissionFailedCPUVendorUnknown)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedCPUVendorUnknown"] = "4.0" } type EVCAdmissionFailedCPUVendorUnknownFault EVCAdmissionFailedCPUVendorUnknown @@ -26324,7 +26347,6 @@ type EVCAdmissionFailedHostDisconnected struct { func init() { t["EVCAdmissionFailedHostDisconnected"] = reflect.TypeOf((*EVCAdmissionFailedHostDisconnected)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedHostDisconnected"] = "4.0" } type EVCAdmissionFailedHostDisconnectedFault EVCAdmissionFailedHostDisconnected @@ -26340,7 +26362,6 @@ type EVCAdmissionFailedHostSoftware struct { func init() { t["EVCAdmissionFailedHostSoftware"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftware)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedHostSoftware"] = "4.0" } type EVCAdmissionFailedHostSoftwareFault EVCAdmissionFailedHostSoftware @@ -26357,7 +26378,6 @@ type EVCAdmissionFailedHostSoftwareForMode struct { func init() { t["EVCAdmissionFailedHostSoftwareForMode"] = reflect.TypeOf((*EVCAdmissionFailedHostSoftwareForMode)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedHostSoftwareForMode"] = "4.0" } type EVCAdmissionFailedHostSoftwareForModeFault EVCAdmissionFailedHostSoftwareForMode @@ -26386,7 +26406,6 @@ type EVCAdmissionFailedVmActive struct { func init() { t["EVCAdmissionFailedVmActive"] = reflect.TypeOf((*EVCAdmissionFailedVmActive)(nil)).Elem() - minAPIVersionForType["EVCAdmissionFailedVmActive"] = "4.0" } type EVCAdmissionFailedVmActiveFault EVCAdmissionFailedVmActive @@ -26401,12 +26420,11 @@ type EVCConfigFault struct { // The faults that caused this EVC test to fail, // such as `FeatureRequirementsNotMet` faults. - Faults []LocalizedMethodFault `xml:"faults,omitempty" json:"faults,omitempty" vim:"5.1"` + Faults []LocalizedMethodFault `xml:"faults,omitempty" json:"faults,omitempty"` } func init() { t["EVCConfigFault"] = reflect.TypeOf((*EVCConfigFault)(nil)).Elem() - minAPIVersionForType["EVCConfigFault"] = "2.5u2" } type EVCConfigFaultFault BaseEVCConfigFault @@ -26466,20 +26484,20 @@ type EVCMode struct { // those CPU features are guaranteed, either because the host // hardware naturally matches those features or because CPU feature override // is used to mask out differences and enforce a match. - GuaranteedCPUFeatures []HostCpuIdInfo `xml:"guaranteedCPUFeatures,omitempty" json:"guaranteedCPUFeatures,omitempty" vim:"4.1"` + GuaranteedCPUFeatures []HostCpuIdInfo `xml:"guaranteedCPUFeatures,omitempty" json:"guaranteedCPUFeatures,omitempty"` // Describes the feature capability baseline associated with the EVC mode. // // On the cluster where a particular EVC mode is configured, // these features capabilities are guaranteed, either because the host // hardware naturally matches those features or because feature masks // are used to mask out differences and enforce a match. - FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty" json:"featureCapability,omitempty" vim:"5.1"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty" json:"featureCapability,omitempty"` // The masks (modifications to a host's feature capabilities) that limit a // host's capabilities to that of the EVC mode baseline. - FeatureMask []HostFeatureMask `xml:"featureMask,omitempty" json:"featureMask,omitempty" vim:"5.1"` + FeatureMask []HostFeatureMask `xml:"featureMask,omitempty" json:"featureMask,omitempty"` // The conditions that must be true of a host's feature capabilities in order // for the host to meet the minimum requirements of the EVC mode baseline. - FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty" json:"featureRequirement,omitempty" vim:"5.1"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty" json:"featureRequirement,omitempty"` // CPU hardware vendor required for this mode. Vendor string `xml:"vendor" json:"vendor"` // Identifiers for feature groups that are at least partially present in @@ -26488,7 +26506,7 @@ type EVCMode struct { // Use this property to compare track values from two modes. // Do not use this property to determine the presence or absence of // specific features. - Track []string `xml:"track,omitempty" json:"track,omitempty" vim:"4.1"` + Track []string `xml:"track,omitempty" json:"track,omitempty"` // Index for ordering the set of modes that apply to a given CPU vendor. // // Use this property to compare vendor tier values from two modes. @@ -26499,7 +26517,6 @@ type EVCMode struct { func init() { t["EVCMode"] = reflect.TypeOf((*EVCMode)(nil)).Elem() - minAPIVersionForType["EVCMode"] = "4.0" } // An attempt to enable Enhanced VMotion Compatibility on a cluster, or change @@ -26517,7 +26534,6 @@ type EVCModeIllegalByVendor struct { func init() { t["EVCModeIllegalByVendor"] = reflect.TypeOf((*EVCModeIllegalByVendor)(nil)).Elem() - minAPIVersionForType["EVCModeIllegalByVendor"] = "2.5u2" } type EVCModeIllegalByVendorFault EVCModeIllegalByVendor @@ -26545,7 +26561,6 @@ type EVCModeUnsupportedByHosts struct { func init() { t["EVCModeUnsupportedByHosts"] = reflect.TypeOf((*EVCModeUnsupportedByHosts)(nil)).Elem() - minAPIVersionForType["EVCModeUnsupportedByHosts"] = "4.0" } type EVCModeUnsupportedByHostsFault EVCModeUnsupportedByHosts @@ -26571,7 +26586,6 @@ type EVCUnsupportedByHostHardware struct { func init() { t["EVCUnsupportedByHostHardware"] = reflect.TypeOf((*EVCUnsupportedByHostHardware)(nil)).Elem() - minAPIVersionForType["EVCUnsupportedByHostHardware"] = "4.1" } type EVCUnsupportedByHostHardwareFault EVCUnsupportedByHostHardware @@ -26597,7 +26611,6 @@ type EVCUnsupportedByHostSoftware struct { func init() { t["EVCUnsupportedByHostSoftware"] = reflect.TypeOf((*EVCUnsupportedByHostSoftware)(nil)).Elem() - minAPIVersionForType["EVCUnsupportedByHostSoftware"] = "4.1" } type EVCUnsupportedByHostSoftwareFault EVCUnsupportedByHostSoftware @@ -26652,7 +26665,6 @@ type EightHostLimitViolated struct { func init() { t["EightHostLimitViolated"] = reflect.TypeOf((*EightHostLimitViolated)(nil)).Elem() - minAPIVersionForType["EightHostLimitViolated"] = "4.0" } type EightHostLimitViolatedFault EightHostLimitViolated @@ -26688,6 +26700,7 @@ type EmitSyslogMarkRequestType struct { func init() { t["EmitSyslogMarkRequestType"] = reflect.TypeOf((*EmitSyslogMarkRequestType)(nil)).Elem() + minAPIVersionForType["EmitSyslogMarkRequestType"] = "8.0.0.2" } type EmitSyslogMarkResponse struct { @@ -26956,7 +26969,6 @@ type EncryptionKeyRequired struct { func init() { t["EncryptionKeyRequired"] = reflect.TypeOf((*EncryptionKeyRequired)(nil)).Elem() - minAPIVersionForType["EncryptionKeyRequired"] = "6.7" } type EncryptionKeyRequiredFault EncryptionKeyRequired @@ -26999,11 +27011,11 @@ type EnterMaintenanceModeRequestType struct { // reasons: (a) no compatible host found for reregistration, (b) DRS // is disabled for the virtual machine. If set to false, powered-off // virtual machines do not need to be moved. - EvacuatePoweredOffVms *bool `xml:"evacuatePoweredOffVms" json:"evacuatePoweredOffVms,omitempty" vim:"2.5"` + EvacuatePoweredOffVms *bool `xml:"evacuatePoweredOffVms" json:"evacuatePoweredOffVms,omitempty"` // Any additional actions to be taken by the host upon // entering maintenance mode. If omitted, default actions will // be taken as documented in the `HostMaintenanceSpec`. - MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty" json:"maintenanceSpec,omitempty" vim:"5.5"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty" json:"maintenanceSpec,omitempty"` } func init() { @@ -27044,7 +27056,6 @@ type EnteredStandbyModeEvent struct { func init() { t["EnteredStandbyModeEvent"] = reflect.TypeOf((*EnteredStandbyModeEvent)(nil)).Elem() - minAPIVersionForType["EnteredStandbyModeEvent"] = "2.5" } // This event records that a host has begun the process of entering @@ -27081,7 +27092,6 @@ type EnteringStandbyModeEvent struct { func init() { t["EnteringStandbyModeEvent"] = reflect.TypeOf((*EnteringStandbyModeEvent)(nil)).Elem() - minAPIVersionForType["EnteringStandbyModeEvent"] = "2.5" } // `EntityBackup` is an abstract data object that contains @@ -27098,7 +27108,6 @@ type EntityBackup struct { func init() { t["EntityBackup"] = reflect.TypeOf((*EntityBackup)(nil)).Elem() - minAPIVersionForType["EntityBackup"] = "5.1" } // The `EntityBackupConfig` data object @@ -27137,14 +27146,14 @@ type EntityBackupConfig struct { ConfigBlob []byte `xml:"configBlob" json:"configBlob"` // Unique identifier of the exported entity or the entity to be restored // through an import operation. - // - If you are importing a virtual distributed switch and the import type is - // `applyToEntitySpecified`, - // set the key to - // `DistributedVirtualSwitch*.*DistributedVirtualSwitch.uuid`. - // - If you are importing a virtual distributed portgroup and the import type is - // `applyToEntitySpecified`, - // set the key to - // `DistributedVirtualPortgroup*.*DistributedVirtualPortgroup.key`. + // - If you are importing a virtual distributed switch and the import type is + // `applyToEntitySpecified`, + // set the key to + // `DistributedVirtualSwitch*.*DistributedVirtualSwitch.uuid`. + // - If you are importing a virtual distributed portgroup and the import type is + // `applyToEntitySpecified`, + // set the key to + // `DistributedVirtualPortgroup*.*DistributedVirtualPortgroup.key`. // // The Server ignores the key value when the import operation creates a new entity. Key string `xml:"key,omitempty" json:"key,omitempty"` @@ -27169,7 +27178,6 @@ type EntityBackupConfig struct { func init() { t["EntityBackupConfig"] = reflect.TypeOf((*EntityBackupConfig)(nil)).Elem() - minAPIVersionForType["EntityBackupConfig"] = "5.1" } // The event argument is a managed entity object. @@ -27201,7 +27209,6 @@ type EntityPrivilege struct { func init() { t["EntityPrivilege"] = reflect.TypeOf((*EntityPrivilege)(nil)).Elem() - minAPIVersionForType["EntityPrivilege"] = "5.5" } // Static strings used for describing an enumerated type. @@ -27216,7 +27223,6 @@ type EnumDescription struct { func init() { t["EnumDescription"] = reflect.TypeOf((*EnumDescription)(nil)).Elem() - minAPIVersionForType["EnumDescription"] = "4.0" } // Represent search criteria and filters on a `VirtualMachineConfigOption` @@ -27239,7 +27245,6 @@ type EnvironmentBrowserConfigOptionQuerySpec struct { func init() { t["EnvironmentBrowserConfigOptionQuerySpec"] = reflect.TypeOf((*EnvironmentBrowserConfigOptionQuerySpec)(nil)).Elem() - minAPIVersionForType["EnvironmentBrowserConfigOptionQuerySpec"] = "6.0" } // This event is a general error event from upgrade. @@ -27359,7 +27364,6 @@ type EvaluationLicenseSource struct { func init() { t["EvaluationLicenseSource"] = reflect.TypeOf((*EvaluationLicenseSource)(nil)).Elem() - minAPIVersionForType["EvaluationLicenseSource"] = "2.5" } type EvcManager EvcManagerRequestType @@ -27406,17 +27410,17 @@ type Event struct { // The VirtualMachine object of the event. Vm *VmEventArgument `xml:"vm,omitempty" json:"vm,omitempty"` // The Datastore object of the event. - Ds *DatastoreEventArgument `xml:"ds,omitempty" json:"ds,omitempty" vim:"4.0"` + Ds *DatastoreEventArgument `xml:"ds,omitempty" json:"ds,omitempty"` // The Network object of the event. - Net *NetworkEventArgument `xml:"net,omitempty" json:"net,omitempty" vim:"4.0"` + Net *NetworkEventArgument `xml:"net,omitempty" json:"net,omitempty"` // The DistributedVirtualSwitch object of the event. - Dvs *DvsEventArgument `xml:"dvs,omitempty" json:"dvs,omitempty" vim:"4.0"` + Dvs *DvsEventArgument `xml:"dvs,omitempty" json:"dvs,omitempty"` // A formatted text message describing the event. // // The message may be localized. FullFormattedMessage string `xml:"fullFormattedMessage,omitempty" json:"fullFormattedMessage,omitempty"` // The user entered tag to identify the operations and their side effects - ChangeTag string `xml:"changeTag,omitempty" json:"changeTag,omitempty" vim:"4.0"` + ChangeTag string `xml:"changeTag,omitempty" json:"changeTag,omitempty"` } func init() { @@ -27430,7 +27434,7 @@ type EventAlarmExpression struct { AlarmExpression // The attributes/values to compare. - Comparisons []EventAlarmExpressionComparison `xml:"comparisons,omitempty" json:"comparisons,omitempty" vim:"4.0"` + Comparisons []EventAlarmExpressionComparison `xml:"comparisons,omitempty" json:"comparisons,omitempty"` // Deprecated use eventTypeId instead. // // The type of the event to trigger the alarm on. @@ -27438,11 +27442,11 @@ type EventAlarmExpression struct { // The eventTypeId of the event to match. // // The semantics of how eventTypeId matching is done is as follows: - // - If the event being matched is of type `EventEx` - // or `ExtendedEvent`, then we match this value - // against the eventTypeId (for EventEx) or - // eventId (for ExtendedEvent) member of the Event. - // - Otherwise, we match it against the type of the Event itself. + // - If the event being matched is of type `EventEx` + // or `ExtendedEvent`, then we match this value + // against the eventTypeId (for EventEx) or + // eventId (for ExtendedEvent) member of the Event. + // - Otherwise, we match it against the type of the Event itself. // // Either eventType or eventTypeId _must_ // be set. @@ -27453,15 +27457,15 @@ type EventAlarmExpression struct { // is propagated to child entities in the VirtualCenter inventory depending // on the value of this attribute. If objectType is any of the following, // the alarm is propagated down to all children of that type: - // - A datacenter: `Datacenter`. - // - A cluster of host systems: `ClusterComputeResource`. - // - A single host system: `HostSystem`. - // - A resource pool representing a set of physical resources on a single host: - // `ResourcePool`. - // - A virtual machine: `VirtualMachine`. - // - A datastore: `Datastore`. - // - A network: `Network`. - // - A distributed virtual switch: `DistributedVirtualSwitch`. + // - A datacenter: `Datacenter`. + // - A cluster of host systems: `ClusterComputeResource`. + // - A single host system: `HostSystem`. + // - A resource pool representing a set of physical resources on a single host: + // `ResourcePool`. + // - A virtual machine: `VirtualMachine`. + // - A datastore: `Datastore`. + // - A network: `Network`. + // - A distributed virtual switch: `DistributedVirtualSwitch`. // // If objectType is unspecified or not contained in the above list, // the event alarm is not propagated down to child entities in the @@ -27470,17 +27474,16 @@ type EventAlarmExpression struct { // It is possible to specify an event alarm containing two (or more) different // EventAlarmExpression's which contain different objectTypes. In such a case, // the event is propagated to all child entities with specified type(s). - ObjectType string `xml:"objectType,omitempty" json:"objectType,omitempty" vim:"4.0"` + ObjectType string `xml:"objectType,omitempty" json:"objectType,omitempty"` // The alarm's new state when this condition is evaluated and satisfied. // // If not specified then there is no change to alarm status, and all // actions are fired (rather than those for the transition). - Status ManagedEntityStatus `xml:"status,omitempty" json:"status,omitempty" vim:"4.0"` + Status ManagedEntityStatus `xml:"status,omitempty" json:"status,omitempty"` } func init() { t["EventAlarmExpression"] = reflect.TypeOf((*EventAlarmExpression)(nil)).Elem() - minAPIVersionForType["EventAlarmExpression"] = "2.5" } // Encapsulates Comparison of an event's attribute to a value. @@ -27497,7 +27500,6 @@ type EventAlarmExpressionComparison struct { func init() { t["EventAlarmExpressionComparison"] = reflect.TypeOf((*EventAlarmExpressionComparison)(nil)).Elem() - minAPIVersionForType["EventAlarmExpressionComparison"] = "4.0" } // Describes an available event argument name for an Event type, which @@ -27520,7 +27522,6 @@ type EventArgDesc struct { func init() { t["EventArgDesc"] = reflect.TypeOf((*EventArgDesc)(nil)).Elem() - minAPIVersionForType["EventArgDesc"] = "4.0" } // This is the base type for event argument types. @@ -27546,7 +27547,7 @@ type EventDescription struct { EventInfo []EventDescriptionEventDetail `xml:"eventInfo" json:"eventInfo"` // Localized descriptions of all enumerated types that are used for // member declarations in event classes. - EnumeratedTypes []EnumDescription `xml:"enumeratedTypes,omitempty" json:"enumeratedTypes,omitempty" vim:"4.0"` + EnumeratedTypes []EnumDescription `xml:"enumeratedTypes,omitempty" json:"enumeratedTypes,omitempty"` } func init() { @@ -27578,7 +27579,7 @@ type EventDescriptionEventDetail struct { // // E.g., for `VmPoweredOnEvent`, the eventDescription // in English might say "VM Powered On". - Description string `xml:"description,omitempty" json:"description,omitempty" vim:"4.0"` + Description string `xml:"description,omitempty" json:"description,omitempty"` // A category of events. Category string `xml:"category" json:"category"` // A string that is appropriate in the context of a specific @@ -27631,25 +27632,25 @@ type EventDescriptionEventDetail struct { // For example, the BadUserNameSessionEvent may produce the // following string: // - // - // - // The user could not be logged in because of an unknown or invalid - // user name. - // - // - // The user name was unknown to the system - // Use a user name known to the system user directory - // (On Linux) Check if the user directory is correctly - // configured. - // Check the health of the domain controller (if you are using - // Active Directory) - // - // - // The user provided an invalid password - // Supply the correct password - // - // - LongDescription string `xml:"longDescription,omitempty" json:"longDescription,omitempty" vim:"4.1"` + // + // + // The user could not be logged in because of an unknown or invalid + // user name. + // + // + // The user name was unknown to the system + // Use a user name known to the system user directory + // (On Linux) Check if the user directory is correctly + // configured. + // Check the health of the domain controller (if you are using + // Active Directory) + // + // + // The user provided an invalid password + // Supply the correct password + // + // + LongDescription string `xml:"longDescription,omitempty" json:"longDescription,omitempty"` } func init() { @@ -27694,14 +27695,13 @@ type EventEx struct { // the type of the object, if known to the VirtualCenter inventory ObjectType string `xml:"objectType,omitempty" json:"objectType,omitempty"` // The name of the object - ObjectName string `xml:"objectName,omitempty" json:"objectName,omitempty" vim:"4.1"` + ObjectName string `xml:"objectName,omitempty" json:"objectName,omitempty"` // The fault that triggered the event, if any - Fault *LocalizedMethodFault `xml:"fault,omitempty" json:"fault,omitempty" vim:"4.1"` + Fault *LocalizedMethodFault `xml:"fault,omitempty" json:"fault,omitempty"` } func init() { t["EventEx"] = reflect.TypeOf((*EventEx)(nil)).Elem() - minAPIVersionForType["EventEx"] = "4.0" } // Event filter used to query events in the history collector database. @@ -27770,7 +27770,7 @@ type EventFilterSpec struct { // // If not set, or the size of it 0, the tag of an event is // disregarded. A blank string indicates events without tags. - Tag []string `xml:"tag,omitempty" json:"tag,omitempty" vim:"4.0"` + Tag []string `xml:"tag,omitempty" json:"tag,omitempty"` // This property, if set, limits the set of collected events to those // specified types. // @@ -27778,21 +27778,26 @@ type EventFilterSpec struct { // exception may be thrown by `EventManager.CreateCollectorForEvents`. // // The semantics of how eventTypeId matching is done is as follows: - // - If the event being collected is of type `EventEx` - // or `ExtendedEvent`, then we match against the - // eventTypeId (for EventEx) or - // eventId (for ExtendedEvent) member of the Event. - // - Otherwise, we match against the type of the Event itself. + // - If the event being collected is of type `EventEx` + // or `ExtendedEvent`, then we match against the + // eventTypeId (for EventEx) or + // eventId (for ExtendedEvent) member of the Event. + // - Otherwise, we match against the type of the Event itself. // // If neither this property, nor type, is set, events are // collected regardless of their types. - EventTypeId []string `xml:"eventTypeId,omitempty" json:"eventTypeId,omitempty" vim:"4.0"` + EventTypeId []string `xml:"eventTypeId,omitempty" json:"eventTypeId,omitempty"` // This property, if set, specifies the maximum number of returned events. // // If unset, the default maximum number will be used. // Using this property with `EventManager.CreateCollectorForEvents` is more // efficient than a call to `HistoryCollector.SetCollectorPageSize`. - MaxCount int32 `xml:"maxCount,omitempty" json:"maxCount,omitempty" vim:"6.5"` + MaxCount int32 `xml:"maxCount,omitempty" json:"maxCount,omitempty"` + // This property, if set, specifies whether latest page should be populated on Collector creation. + // + // True for delayed population and false for immediate. + // If unset, the latest page is populated immediately. + DelayedInit *bool `xml:"delayedInit" json:"delayedInit,omitempty" vim:"8.0.3.0"` } func init() { @@ -27963,7 +27968,6 @@ type ExitStandbyModeFailedEvent struct { func init() { t["ExitStandbyModeFailedEvent"] = reflect.TypeOf((*ExitStandbyModeFailedEvent)(nil)).Elem() - minAPIVersionForType["ExitStandbyModeFailedEvent"] = "4.0" } // This event records that the host is no longer in @@ -27974,7 +27978,6 @@ type ExitedStandbyModeEvent struct { func init() { t["ExitedStandbyModeEvent"] = reflect.TypeOf((*ExitedStandbyModeEvent)(nil)).Elem() - minAPIVersionForType["ExitedStandbyModeEvent"] = "2.5" } // This event records that a host has begun the process of @@ -27985,7 +27988,6 @@ type ExitingStandbyModeEvent struct { func init() { t["ExitingStandbyModeEvent"] = reflect.TypeOf((*ExitingStandbyModeEvent)(nil)).Elem() - minAPIVersionForType["ExitingStandbyModeEvent"] = "4.0" } type ExpandVmfsDatastore ExpandVmfsDatastoreRequestType @@ -28045,7 +28047,6 @@ type ExpiredAddonLicense struct { func init() { t["ExpiredAddonLicense"] = reflect.TypeOf((*ExpiredAddonLicense)(nil)).Elem() - minAPIVersionForType["ExpiredAddonLicense"] = "2.5" } type ExpiredAddonLicenseFault ExpiredAddonLicense @@ -28062,7 +28063,6 @@ type ExpiredEditionLicense struct { func init() { t["ExpiredEditionLicense"] = reflect.TypeOf((*ExpiredEditionLicense)(nil)).Elem() - minAPIVersionForType["ExpiredEditionLicense"] = "2.5" } type ExpiredEditionLicenseFault ExpiredEditionLicense @@ -28083,7 +28083,6 @@ type ExpiredFeatureLicense struct { func init() { t["ExpiredFeatureLicense"] = reflect.TypeOf((*ExpiredFeatureLicense)(nil)).Elem() - minAPIVersionForType["ExpiredFeatureLicense"] = "2.5" } type ExpiredFeatureLicenseFault BaseExpiredFeatureLicense @@ -28205,7 +28204,6 @@ type ExtExtendedProductInfo struct { func init() { t["ExtExtendedProductInfo"] = reflect.TypeOf((*ExtExtendedProductInfo)(nil)).Elem() - minAPIVersionForType["ExtExtendedProductInfo"] = "5.0" } // This data object contains information about entities managed by this @@ -28239,7 +28237,7 @@ type ExtManagedEntityInfo struct { // This icon will be scaled to 16x16, 32x32, 64x64, and // 128x128 if needed. The icon is shown for all entities of this type // managed by this extension. - IconUrl string `xml:"iconUrl,omitempty" json:"iconUrl,omitempty" vim:"5.1"` + IconUrl string `xml:"iconUrl,omitempty" json:"iconUrl,omitempty"` // Description of this managed entity type. // // This is typically displayed @@ -28250,7 +28248,6 @@ type ExtManagedEntityInfo struct { func init() { t["ExtManagedEntityInfo"] = reflect.TypeOf((*ExtManagedEntityInfo)(nil)).Elem() - minAPIVersionForType["ExtManagedEntityInfo"] = "5.0" } // This data object encapsulates the Solution Manager configuration for @@ -28275,7 +28272,6 @@ type ExtSolutionManagerInfo struct { func init() { t["ExtSolutionManagerInfo"] = reflect.TypeOf((*ExtSolutionManagerInfo)(nil)).Elem() - minAPIVersionForType["ExtSolutionManagerInfo"] = "5.0" } // Deprecated as of vSphere API 5.1. @@ -28297,7 +28293,6 @@ type ExtSolutionManagerInfoTabInfo struct { func init() { t["ExtSolutionManagerInfoTabInfo"] = reflect.TypeOf((*ExtSolutionManagerInfoTabInfo)(nil)).Elem() - minAPIVersionForType["ExtSolutionManagerInfoTabInfo"] = "5.0" } // The parameters of `VcenterVStorageObjectManager.ExtendDisk_Task`. @@ -28404,7 +28399,7 @@ type ExtendVirtualDiskRequestType struct { NewCapacityKb int64 `xml:"newCapacityKb" json:"newCapacityKb"` // If true, the extended part of the disk will be // explicitly filled with zeroes. - EagerZero *bool `xml:"eagerZero" json:"eagerZero,omitempty" vim:"4.0"` + EagerZero *bool `xml:"eagerZero" json:"eagerZero,omitempty"` } func init() { @@ -28513,7 +28508,6 @@ type ExtendedEvent struct { func init() { t["ExtendedEvent"] = reflect.TypeOf((*ExtendedEvent)(nil)).Elem() - minAPIVersionForType["ExtendedEvent"] = "2.5" } // key/value pair @@ -28526,7 +28520,6 @@ type ExtendedEventPair struct { func init() { t["ExtendedEventPair"] = reflect.TypeOf((*ExtendedEventPair)(nil)).Elem() - minAPIVersionForType["ExtendedEventPair"] = "2.5" } // This fault is the container for faults logged by extensions. @@ -28541,7 +28534,6 @@ type ExtendedFault struct { func init() { t["ExtendedFault"] = reflect.TypeOf((*ExtendedFault)(nil)).Elem() - minAPIVersionForType["ExtendedFault"] = "2.5" } type ExtendedFaultFault ExtendedFault @@ -28567,16 +28559,16 @@ type Extension struct { // Extension names can only contain characters belonging to the // lower ASCII character set (UTF-7) with the exception of the // following characters: - // 1. All whitespace characters ("space" - ascii character 0x20 is allowed) - // 2. Control characters - // 3. Comma (ascii 0x2c), Forward slash (ascii 0x2f), Backward slash (ascii 0x5c), - // Hash/Pound (ascii 0x23), Plus (ascii 0x2b), Greater (ascii 0x3e), Lesser (ascii 0x3c), - // Equals (ascii 0x3d), Semi-colon (ascii 0x3b) and Double quote (ascii 0x22). + // 1. All whitespace characters ("space" - ascii character 0x20 is allowed) + // 2. Control characters + // 3. Comma (ascii 0x2c), Forward slash (ascii 0x2f), Backward slash (ascii 0x5c), + // Hash/Pound (ascii 0x23), Plus (ascii 0x2b), Greater (ascii 0x3e), Lesser (ascii 0x3c), + // Equals (ascii 0x3d), Semi-colon (ascii 0x3b) and Double quote (ascii 0x22). Key string `xml:"key" json:"key"` // Company information. - Company string `xml:"company,omitempty" json:"company,omitempty" vim:"4.0"` + Company string `xml:"company,omitempty" json:"company,omitempty"` // Type of extension (example may include CP-DVS, NUOVA-DVS, etc.). - Type string `xml:"type,omitempty" json:"type,omitempty" vim:"4.0"` + Type string `xml:"type,omitempty" json:"type,omitempty"` // Extension version number as a dot-separated string. // // For example, "1.0.0" @@ -28600,31 +28592,30 @@ type Extension struct { // Last extension heartbeat time. LastHeartbeatTime time.Time `xml:"lastHeartbeatTime" json:"lastHeartbeatTime"` // Health specification provided by this extension. - HealthInfo *ExtensionHealthInfo `xml:"healthInfo,omitempty" json:"healthInfo,omitempty" vim:"4.0"` + HealthInfo *ExtensionHealthInfo `xml:"healthInfo,omitempty" json:"healthInfo,omitempty"` // OVF consumer specification provided by this extension. - OvfConsumerInfo *ExtensionOvfConsumerInfo `xml:"ovfConsumerInfo,omitempty" json:"ovfConsumerInfo,omitempty" vim:"5.0"` + OvfConsumerInfo *ExtensionOvfConsumerInfo `xml:"ovfConsumerInfo,omitempty" json:"ovfConsumerInfo,omitempty"` // Extended product information, such as URLs to vendor, product, etc. - ExtendedProductInfo *ExtExtendedProductInfo `xml:"extendedProductInfo,omitempty" json:"extendedProductInfo,omitempty" vim:"5.0"` + ExtendedProductInfo *ExtExtendedProductInfo `xml:"extendedProductInfo,omitempty" json:"extendedProductInfo,omitempty"` // Information about entities managed by this extension. // // An extension can // register virtual machines as managed by itself, by setting the // `managedBy` property of the virtual // machine. - ManagedEntityInfo []ExtManagedEntityInfo `xml:"managedEntityInfo,omitempty" json:"managedEntityInfo,omitempty" vim:"5.0"` + ManagedEntityInfo []ExtManagedEntityInfo `xml:"managedEntityInfo,omitempty" json:"managedEntityInfo,omitempty"` // Opt-in to the Solution Manager. // // If set to true, this extension will be // shown in the Solution Manager. If not set, or set to false, this extension // is not shown in the Solution Manager. - ShownInSolutionManager *bool `xml:"shownInSolutionManager" json:"shownInSolutionManager,omitempty" vim:"5.0"` + ShownInSolutionManager *bool `xml:"shownInSolutionManager" json:"shownInSolutionManager,omitempty"` // Solution Manager configuration for this extension. - SolutionManagerInfo *ExtSolutionManagerInfo `xml:"solutionManagerInfo,omitempty" json:"solutionManagerInfo,omitempty" vim:"5.0"` + SolutionManagerInfo *ExtSolutionManagerInfo `xml:"solutionManagerInfo,omitempty" json:"solutionManagerInfo,omitempty"` } func init() { t["Extension"] = reflect.TypeOf((*Extension)(nil)).Elem() - minAPIVersionForType["Extension"] = "2.5" } // This data object type describes a client of the extension. @@ -28647,7 +28638,6 @@ type ExtensionClientInfo struct { func init() { t["ExtensionClientInfo"] = reflect.TypeOf((*ExtensionClientInfo)(nil)).Elem() - minAPIVersionForType["ExtensionClientInfo"] = "2.5" } // This data object type describes event types defined by the extension. @@ -28663,43 +28653,44 @@ type ExtensionEventTypeInfo struct { // // The structure of this descriptor is: // - // - // eventID - // Optional description for event eventID - // <-- Optional arguments: --> - // - // <-- Zero or more of: --> - // - // argName - // argtype - // - // - // + // + // eventID + // Optional description for event eventID + // <-- Optional arguments: --> + // + // <-- Zero or more of: --> + // + // argName + // argtype + // + // + // + // // where _argtype_ can be one of the following: - // - This is an example list and should be considered as incomplete. + // - This is an example list and should be considered as incomplete. + // // - // - Primitive types: - // - _string_ - // - _bool_ - // - _int_ - // - _long_ - // - _float_ - // - _moid_ - // - Entity reference types: - // - _vm_ - // - _host_ - // - _resourcepool_ - // - _computeresource_ - // - _datacenter_ - // - _datastore_ - // - _network_ - // - _dvs_ - EventTypeSchema string `xml:"eventTypeSchema,omitempty" json:"eventTypeSchema,omitempty" vim:"4.0"` + // - Primitive types: + // - _string_ + // - _bool_ + // - _int_ + // - _long_ + // - _float_ + // - _moid_ + // - Entity reference types: + // - _vm_ + // - _host_ + // - _resourcepool_ + // - _computeresource_ + // - _datacenter_ + // - _datastore_ + // - _network_ + // - _dvs_ + EventTypeSchema string `xml:"eventTypeSchema,omitempty" json:"eventTypeSchema,omitempty"` } func init() { t["ExtensionEventTypeInfo"] = reflect.TypeOf((*ExtensionEventTypeInfo)(nil)).Elem() - minAPIVersionForType["ExtensionEventTypeInfo"] = "2.5" } // This data object type describes fault types defined by the extension. @@ -28715,7 +28706,6 @@ type ExtensionFaultTypeInfo struct { func init() { t["ExtensionFaultTypeInfo"] = reflect.TypeOf((*ExtensionFaultTypeInfo)(nil)).Elem() - minAPIVersionForType["ExtensionFaultTypeInfo"] = "2.5" } // This data object encapsulates the health specification for the @@ -28728,7 +28718,6 @@ type ExtensionHealthInfo struct { func init() { t["ExtensionHealthInfo"] = reflect.TypeOf((*ExtensionHealthInfo)(nil)).Elem() - minAPIVersionForType["ExtensionHealthInfo"] = "4.0" } // This data object type contains usage information about an @@ -28745,7 +28734,6 @@ type ExtensionManagerIpAllocationUsage struct { func init() { t["ExtensionManagerIpAllocationUsage"] = reflect.TypeOf((*ExtensionManagerIpAllocationUsage)(nil)).Elem() - minAPIVersionForType["ExtensionManagerIpAllocationUsage"] = "5.1" } // This data object contains configuration for extensions that also extend the OVF @@ -28780,7 +28768,6 @@ type ExtensionOvfConsumerInfo struct { func init() { t["ExtensionOvfConsumerInfo"] = reflect.TypeOf((*ExtensionOvfConsumerInfo)(nil)).Elem() - minAPIVersionForType["ExtensionOvfConsumerInfo"] = "5.0" } // This data object type describes privileges defined by the extension. @@ -28808,7 +28795,6 @@ type ExtensionPrivilegeInfo struct { func init() { t["ExtensionPrivilegeInfo"] = reflect.TypeOf((*ExtensionPrivilegeInfo)(nil)).Elem() - minAPIVersionForType["ExtensionPrivilegeInfo"] = "2.5" } // This data object encapsulates the message resources for all locales. @@ -28825,7 +28811,6 @@ type ExtensionResourceInfo struct { func init() { t["ExtensionResourceInfo"] = reflect.TypeOf((*ExtensionResourceInfo)(nil)).Elem() - minAPIVersionForType["ExtensionResourceInfo"] = "2.5" } // This data object type describes a server for the extension. @@ -28843,7 +28828,7 @@ type ExtensionServerInfo struct { // Extension administrator email addresses. AdminEmail []string `xml:"adminEmail" json:"adminEmail"` // Thumbprint of the extension server certificate presented to clients - ServerThumbprint string `xml:"serverThumbprint,omitempty" json:"serverThumbprint,omitempty" vim:"4.1"` + ServerThumbprint string `xml:"serverThumbprint,omitempty" json:"serverThumbprint,omitempty"` // X.509 certificate of the extension server presented to clients in PEM // format according to RFC 7468 ServerCertificate string `xml:"serverCertificate,omitempty" json:"serverCertificate,omitempty" vim:"8.0.2.0"` @@ -28851,7 +28836,6 @@ type ExtensionServerInfo struct { func init() { t["ExtensionServerInfo"] = reflect.TypeOf((*ExtensionServerInfo)(nil)).Elem() - minAPIVersionForType["ExtensionServerInfo"] = "2.5" } // This data object type describes task types defined by the extension. @@ -28867,7 +28851,6 @@ type ExtensionTaskTypeInfo struct { func init() { t["ExtensionTaskTypeInfo"] = reflect.TypeOf((*ExtensionTaskTypeInfo)(nil)).Elem() - minAPIVersionForType["ExtensionTaskTypeInfo"] = "2.5" } type ExtractOvfEnvironment ExtractOvfEnvironmentRequestType @@ -28906,7 +28889,6 @@ type FailToEnableSPBM struct { func init() { t["FailToEnableSPBM"] = reflect.TypeOf((*FailToEnableSPBM)(nil)).Elem() - minAPIVersionForType["FailToEnableSPBM"] = "5.0" } type FailToEnableSPBMFault FailToEnableSPBM @@ -28934,7 +28916,6 @@ type FailToLockFaultToleranceVMs struct { func init() { t["FailToLockFaultToleranceVMs"] = reflect.TypeOf((*FailToLockFaultToleranceVMs)(nil)).Elem() - minAPIVersionForType["FailToLockFaultToleranceVMs"] = "4.1" } type FailToLockFaultToleranceVMsFault FailToLockFaultToleranceVMs @@ -28991,7 +28972,6 @@ type FaultDomainId struct { func init() { t["FaultDomainId"] = reflect.TypeOf((*FaultDomainId)(nil)).Elem() - minAPIVersionForType["FaultDomainId"] = "6.5" } // More than one VM in the same fault tolerance group are placed on the same host @@ -29008,7 +28988,6 @@ type FaultToleranceAntiAffinityViolated struct { func init() { t["FaultToleranceAntiAffinityViolated"] = reflect.TypeOf((*FaultToleranceAntiAffinityViolated)(nil)).Elem() - minAPIVersionForType["FaultToleranceAntiAffinityViolated"] = "4.0" } type FaultToleranceAntiAffinityViolatedFault FaultToleranceAntiAffinityViolated @@ -29032,7 +29011,6 @@ type FaultToleranceCannotEditMem struct { func init() { t["FaultToleranceCannotEditMem"] = reflect.TypeOf((*FaultToleranceCannotEditMem)(nil)).Elem() - minAPIVersionForType["FaultToleranceCannotEditMem"] = "4.1" } type FaultToleranceCannotEditMemFault FaultToleranceCannotEditMem @@ -29062,12 +29040,11 @@ type FaultToleranceConfigInfo struct { ConfigPaths []string `xml:"configPaths" json:"configPaths"` // Indicates whether a secondary VM is orphaned (no longer associated with // the primary VM). - Orphaned *bool `xml:"orphaned" json:"orphaned,omitempty" vim:"6.0"` + Orphaned *bool `xml:"orphaned" json:"orphaned,omitempty"` } func init() { t["FaultToleranceConfigInfo"] = reflect.TypeOf((*FaultToleranceConfigInfo)(nil)).Elem() - minAPIVersionForType["FaultToleranceConfigInfo"] = "4.0" } // FaultToleranceConfigSpec contains information about the metadata file @@ -29079,11 +29056,26 @@ type FaultToleranceConfigSpec struct { MetaDataPath *FaultToleranceMetaSpec `xml:"metaDataPath,omitempty" json:"metaDataPath,omitempty"` // Placement information for secondary SecondaryVmSpec *FaultToleranceVMConfigSpec `xml:"secondaryVmSpec,omitempty" json:"secondaryVmSpec,omitempty"` + // Indicates whether FT Metro Cluster is enabled/disabled. + // + // \- If TRUE, FT Metro Cluster is enabled for the VM. An implicit + // Anti-HostGroup will be generated from HostGroup defined for FT + // primary, then affine the primary with one HostGroup and affine the + // secondary with another HostGroup. + // \- If FALSE or unset, FT Metro Cluster is disabled for the VM. Both FT + // primary and secondary will be put in the same HostGroup. + MetroFtEnabled *bool `xml:"metroFtEnabled" json:"metroFtEnabled,omitempty" vim:"8.0.3.0"` + // Indicate the Host Group (`ClusterHostGroup`) for FT + // Metro Cluster enabled Virtual Machine. + // + // Based on the selected Host Group, FT can divide the hosts in the cluster + // into two groups and ensure to place FT primary and FT secondary in + // different groups. + MetroFtHostGroup string `xml:"metroFtHostGroup,omitempty" json:"metroFtHostGroup,omitempty" vim:"8.0.3.0"` } func init() { t["FaultToleranceConfigSpec"] = reflect.TypeOf((*FaultToleranceConfigSpec)(nil)).Elem() - minAPIVersionForType["FaultToleranceConfigSpec"] = "6.0" } // Convenience subclass for calling out some named features among the @@ -29101,7 +29093,6 @@ type FaultToleranceCpuIncompatible struct { func init() { t["FaultToleranceCpuIncompatible"] = reflect.TypeOf((*FaultToleranceCpuIncompatible)(nil)).Elem() - minAPIVersionForType["FaultToleranceCpuIncompatible"] = "4.0" } type FaultToleranceCpuIncompatibleFault FaultToleranceCpuIncompatible @@ -29125,7 +29116,6 @@ type FaultToleranceDiskSpec struct { func init() { t["FaultToleranceDiskSpec"] = reflect.TypeOf((*FaultToleranceDiskSpec)(nil)).Elem() - minAPIVersionForType["FaultToleranceDiskSpec"] = "6.0" } // This data object encapsulates the Datastore for the shared metadata file @@ -29141,7 +29131,6 @@ type FaultToleranceMetaSpec struct { func init() { t["FaultToleranceMetaSpec"] = reflect.TypeOf((*FaultToleranceMetaSpec)(nil)).Elem() - minAPIVersionForType["FaultToleranceMetaSpec"] = "6.0" } // Fault Tolerance VM requires thick disks @@ -29154,7 +29143,6 @@ type FaultToleranceNeedsThickDisk struct { func init() { t["FaultToleranceNeedsThickDisk"] = reflect.TypeOf((*FaultToleranceNeedsThickDisk)(nil)).Elem() - minAPIVersionForType["FaultToleranceNeedsThickDisk"] = "4.1" } type FaultToleranceNeedsThickDiskFault FaultToleranceNeedsThickDisk @@ -29176,7 +29164,6 @@ type FaultToleranceNotLicensed struct { func init() { t["FaultToleranceNotLicensed"] = reflect.TypeOf((*FaultToleranceNotLicensed)(nil)).Elem() - minAPIVersionForType["FaultToleranceNotLicensed"] = "4.0" } type FaultToleranceNotLicensedFault FaultToleranceNotLicensed @@ -29196,7 +29183,6 @@ type FaultToleranceNotSameBuild struct { func init() { t["FaultToleranceNotSameBuild"] = reflect.TypeOf((*FaultToleranceNotSameBuild)(nil)).Elem() - minAPIVersionForType["FaultToleranceNotSameBuild"] = "4.0" } type FaultToleranceNotSameBuildFault FaultToleranceNotSameBuild @@ -29210,12 +29196,12 @@ func init() { type FaultTolerancePrimaryConfigInfo struct { FaultToleranceConfigInfo + // Refers instances of `VirtualMachine`. Secondaries []ManagedObjectReference `xml:"secondaries" json:"secondaries"` } func init() { t["FaultTolerancePrimaryConfigInfo"] = reflect.TypeOf((*FaultTolerancePrimaryConfigInfo)(nil)).Elem() - minAPIVersionForType["FaultTolerancePrimaryConfigInfo"] = "4.0" } // This fault is used to report that VirtualCenter did not attempt to power on @@ -29236,7 +29222,6 @@ type FaultTolerancePrimaryPowerOnNotAttempted struct { func init() { t["FaultTolerancePrimaryPowerOnNotAttempted"] = reflect.TypeOf((*FaultTolerancePrimaryPowerOnNotAttempted)(nil)).Elem() - minAPIVersionForType["FaultTolerancePrimaryPowerOnNotAttempted"] = "4.0" } type FaultTolerancePrimaryPowerOnNotAttemptedFault FaultTolerancePrimaryPowerOnNotAttempted @@ -29250,12 +29235,12 @@ func init() { type FaultToleranceSecondaryConfigInfo struct { FaultToleranceConfigInfo + // Refers instance of `VirtualMachine`. PrimaryVM ManagedObjectReference `xml:"primaryVM" json:"primaryVM"` } func init() { t["FaultToleranceSecondaryConfigInfo"] = reflect.TypeOf((*FaultToleranceSecondaryConfigInfo)(nil)).Elem() - minAPIVersionForType["FaultToleranceSecondaryConfigInfo"] = "4.0" } // FaultToleranceSecondaryOpResult is a data object that reports on @@ -29292,7 +29277,6 @@ type FaultToleranceSecondaryOpResult struct { func init() { t["FaultToleranceSecondaryOpResult"] = reflect.TypeOf((*FaultToleranceSecondaryOpResult)(nil)).Elem() - minAPIVersionForType["FaultToleranceSecondaryOpResult"] = "4.0" } // FaultToleranceVMConfigSpec contains information about placement of @@ -29313,7 +29297,6 @@ type FaultToleranceVMConfigSpec struct { func init() { t["FaultToleranceVMConfigSpec"] = reflect.TypeOf((*FaultToleranceVMConfigSpec)(nil)).Elem() - minAPIVersionForType["FaultToleranceVMConfigSpec"] = "6.0" } // A FaultToleranceVmNotDasProtected fault occurs when an Fault Tolerance VM @@ -29332,7 +29315,6 @@ type FaultToleranceVmNotDasProtected struct { func init() { t["FaultToleranceVmNotDasProtected"] = reflect.TypeOf((*FaultToleranceVmNotDasProtected)(nil)).Elem() - minAPIVersionForType["FaultToleranceVmNotDasProtected"] = "5.0" } type FaultToleranceVmNotDasProtectedFault FaultToleranceVmNotDasProtected @@ -29358,7 +29340,6 @@ type FaultsByHost struct { func init() { t["FaultsByHost"] = reflect.TypeOf((*FaultsByHost)(nil)).Elem() - minAPIVersionForType["FaultsByHost"] = "6.7" } // VM specific faults. @@ -29378,7 +29359,6 @@ type FaultsByVM struct { func init() { t["FaultsByVM"] = reflect.TypeOf((*FaultsByVM)(nil)).Elem() - minAPIVersionForType["FaultsByVM"] = "6.7" } // This data object type describes an FCoE configuration as it pertains @@ -29412,7 +29392,6 @@ type FcoeConfig struct { func init() { t["FcoeConfig"] = reflect.TypeOf((*FcoeConfig)(nil)).Elem() - minAPIVersionForType["FcoeConfig"] = "5.0" } // Flags which indicate what parameters are settable for this FcoeConfig. @@ -29426,7 +29405,6 @@ type FcoeConfigFcoeCapabilities struct { func init() { t["FcoeConfigFcoeCapabilities"] = reflect.TypeOf((*FcoeConfigFcoeCapabilities)(nil)).Elem() - minAPIVersionForType["FcoeConfigFcoeCapabilities"] = "5.0" } // An FcoeSpecification contains values relevant to issuing FCoE discovery. @@ -29453,7 +29431,6 @@ type FcoeConfigFcoeSpecification struct { func init() { t["FcoeConfigFcoeSpecification"] = reflect.TypeOf((*FcoeConfigFcoeSpecification)(nil)).Elem() - minAPIVersionForType["FcoeConfigFcoeSpecification"] = "5.0" } // Used to represent inclusive intervals of VLAN IDs. @@ -29469,7 +29446,6 @@ type FcoeConfigVlanRange struct { func init() { t["FcoeConfigVlanRange"] = reflect.TypeOf((*FcoeConfigVlanRange)(nil)).Elem() - minAPIVersionForType["FcoeConfigVlanRange"] = "5.0" } // Deprecated as of vSphere API 8.0. Software FCoE not supported. @@ -29481,7 +29457,6 @@ type FcoeFault struct { func init() { t["FcoeFault"] = reflect.TypeOf((*FcoeFault)(nil)).Elem() - minAPIVersionForType["FcoeFault"] = "5.0" } type FcoeFaultFault BaseFcoeFault @@ -29502,7 +29477,6 @@ type FcoeFaultPnicHasNoPortSet struct { func init() { t["FcoeFaultPnicHasNoPortSet"] = reflect.TypeOf((*FcoeFaultPnicHasNoPortSet)(nil)).Elem() - minAPIVersionForType["FcoeFaultPnicHasNoPortSet"] = "5.0" } type FcoeFaultPnicHasNoPortSetFault FcoeFaultPnicHasNoPortSet @@ -29569,7 +29543,6 @@ type FeatureRequirementsNotMet struct { func init() { t["FeatureRequirementsNotMet"] = reflect.TypeOf((*FeatureRequirementsNotMet)(nil)).Elem() - minAPIVersionForType["FeatureRequirementsNotMet"] = "5.1" } type FeatureRequirementsNotMetFault FeatureRequirementsNotMet @@ -29595,6 +29568,7 @@ type FetchAuditRecordsRequestType struct { func init() { t["FetchAuditRecordsRequestType"] = reflect.TypeOf((*FetchAuditRecordsRequestType)(nil)).Elem() + minAPIVersionForType["FetchAuditRecordsRequestType"] = "7.0.3.0" } type FetchAuditRecordsResponse struct { @@ -29717,7 +29691,6 @@ type FileBackedPortNotSupported struct { func init() { t["FileBackedPortNotSupported"] = reflect.TypeOf((*FileBackedPortNotSupported)(nil)).Elem() - minAPIVersionForType["FileBackedPortNotSupported"] = "2.5" } type FileBackedPortNotSupportedFault FileBackedPortNotSupported @@ -29740,14 +29713,13 @@ type FileBackedVirtualDiskSpec struct { // interact with it. // This is an optional parameter and if user doesn't specify profile, // the default behavior will apply. - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty" vim:"5.5"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty"` // Encryption options for the new virtual disk. - Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr" json:"crypto,omitempty" vim:"6.5"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr" json:"crypto,omitempty"` } func init() { t["FileBackedVirtualDiskSpec"] = reflect.TypeOf((*FileBackedVirtualDiskSpec)(nil)).Elem() - minAPIVersionForType["FileBackedVirtualDiskSpec"] = "2.5" } // The common base type for all file-related exceptions. @@ -29781,13 +29753,13 @@ type FileInfo struct { // The path relative to the folder path in the search results. Path string `xml:"path" json:"path"` // User friendly name. - FriendlyName string `xml:"friendlyName,omitempty" json:"friendlyName,omitempty" vim:"6.5"` + FriendlyName string `xml:"friendlyName,omitempty" json:"friendlyName,omitempty"` // The size of the file in bytes. FileSize int64 `xml:"fileSize,omitempty" json:"fileSize,omitempty"` // The last date and time the file was modified. Modification *time.Time `xml:"modification" json:"modification,omitempty"` // The user name of the owner of the file. - Owner string `xml:"owner,omitempty" json:"owner,omitempty" vim:"4.0"` + Owner string `xml:"owner,omitempty" json:"owner,omitempty"` } func init() { @@ -29835,6 +29807,7 @@ type FileLockInfoResult struct { func init() { t["FileLockInfoResult"] = reflect.TypeOf((*FileLockInfoResult)(nil)).Elem() + minAPIVersionForType["FileLockInfoResult"] = "8.0.2.0" } // Thrown if an attempt is made to lock a file that is already in use. @@ -29860,7 +29833,6 @@ type FileNameTooLong struct { func init() { t["FileNameTooLong"] = reflect.TypeOf((*FileNameTooLong)(nil)).Elem() - minAPIVersionForType["FileNameTooLong"] = "5.0" } type FileNameTooLongFault FileNameTooLong @@ -29934,7 +29906,7 @@ type FileQueryFlags struct { // last modified. Modification bool `xml:"modification" json:"modification"` // The flag to indicate whether or not to return the file owner. - FileOwner *bool `xml:"fileOwner" json:"fileOwner,omitempty" vim:"4.0"` + FileOwner *bool `xml:"fileOwner" json:"fileOwner,omitempty"` } func init() { @@ -29956,7 +29928,6 @@ type FileTooLarge struct { func init() { t["FileTooLarge"] = reflect.TypeOf((*FileTooLarge)(nil)).Elem() - minAPIVersionForType["FileTooLarge"] = "2.5" } type FileTooLargeFault FileTooLarge @@ -29985,7 +29956,6 @@ type FileTransferInformation struct { // Multiple GET requests cannot be sent to the URL simultaneously. URL // will become invalid once a successful GET request is sent. // - // // The host part of the URL is returned as '\*' if the hostname to be used // is the name of the server to which the call was made. For example, if // the call is made to esx-svr-1.domain1.com, and the file is available for @@ -29996,7 +29966,6 @@ type FileTransferInformation struct { // The client replaces the asterisk with the server name on which it // invoked the call. // - // // The URL is valid only for 10 minutes from the time it is generated. // Also, the URL becomes invalid whenever the virtual machine is powered // off, suspended or unregistered. @@ -30005,7 +29974,6 @@ type FileTransferInformation struct { func init() { t["FileTransferInformation"] = reflect.TypeOf((*FileTransferInformation)(nil)).Elem() - minAPIVersionForType["FileTransferInformation"] = "5.0" } // This fault is thrown when creating a quiesced snapshot failed @@ -30041,7 +30009,6 @@ type FilterInUse struct { func init() { t["FilterInUse"] = reflect.TypeOf((*FilterInUse)(nil)).Elem() - minAPIVersionForType["FilterInUse"] = "6.0" } type FilterInUseFault FilterInUse @@ -30300,7 +30267,7 @@ type FindByUuidRequestType struct { // for virtual machines whose instance UUID matches the given uuid. // Otherwise, search for virtual machines whose BIOS UUID matches the given // uuid. - InstanceUuid *bool `xml:"instanceUuid" json:"instanceUuid,omitempty" vim:"4.0"` + InstanceUuid *bool `xml:"instanceUuid" json:"instanceUuid,omitempty"` } func init() { @@ -30396,7 +30363,6 @@ type FirewallProfile struct { func init() { t["FirewallProfile"] = reflect.TypeOf((*FirewallProfile)(nil)).Elem() - minAPIVersionForType["FirewallProfile"] = "4.0" } type FirewallProfileRulesetProfile struct { @@ -30622,7 +30588,6 @@ type FtIssuesOnHost struct { func init() { t["FtIssuesOnHost"] = reflect.TypeOf((*FtIssuesOnHost)(nil)).Elem() - minAPIVersionForType["FtIssuesOnHost"] = "4.0" } type FtIssuesOnHostFault FtIssuesOnHost @@ -30631,6 +30596,32 @@ func init() { t["FtIssuesOnHostFault"] = reflect.TypeOf((*FtIssuesOnHostFault)(nil)).Elem() } +// The virtual machine if powered on or VMotioned, would violate an +// FT VM-Host rule. +type FtVmHostRuleViolation struct { + VmConfigFault + + // The vm that can not be powered on or VMotioned without violating FT Metro + // Cluster placement rule. + VmName string `xml:"vmName" json:"vmName"` + // The host that the virtual machine can not be powered on without violating + // FT Metro Cluster placement rule. + HostName string `xml:"hostName" json:"hostName"` + // Indicate the Host Group for FT Metro Cluster enabled Virtual Machine. + HostGroup string `xml:"hostGroup" json:"hostGroup"` +} + +func init() { + t["FtVmHostRuleViolation"] = reflect.TypeOf((*FtVmHostRuleViolation)(nil)).Elem() + minAPIVersionForType["FtVmHostRuleViolation"] = "8.0.3.0" +} + +type FtVmHostRuleViolationFault FtVmHostRuleViolation + +func init() { + t["FtVmHostRuleViolationFault"] = reflect.TypeOf((*FtVmHostRuleViolationFault)(nil)).Elem() +} + // An operation on a powered-on virtual machine requests a simultaneous change // of storage location and execution host, but the host does not have that // capability. @@ -30640,7 +30631,6 @@ type FullStorageVMotionNotSupported struct { func init() { t["FullStorageVMotionNotSupported"] = reflect.TypeOf((*FullStorageVMotionNotSupported)(nil)).Elem() - minAPIVersionForType["FullStorageVMotionNotSupported"] = "2.5" } type FullStorageVMotionNotSupportedFault FullStorageVMotionNotSupported @@ -30671,7 +30661,6 @@ type GatewayConnectFault struct { func init() { t["GatewayConnectFault"] = reflect.TypeOf((*GatewayConnectFault)(nil)).Elem() - minAPIVersionForType["GatewayConnectFault"] = "6.0" } type GatewayConnectFaultFault BaseGatewayConnectFault @@ -30692,7 +30681,6 @@ type GatewayHostNotReachable struct { func init() { t["GatewayHostNotReachable"] = reflect.TypeOf((*GatewayHostNotReachable)(nil)).Elem() - minAPIVersionForType["GatewayHostNotReachable"] = "6.0" } type GatewayHostNotReachableFault GatewayHostNotReachable @@ -30711,7 +30699,6 @@ type GatewayNotFound struct { func init() { t["GatewayNotFound"] = reflect.TypeOf((*GatewayNotFound)(nil)).Elem() - minAPIVersionForType["GatewayNotFound"] = "6.0" } type GatewayNotFoundFault GatewayNotFound @@ -30734,7 +30721,6 @@ type GatewayNotReachable struct { func init() { t["GatewayNotReachable"] = reflect.TypeOf((*GatewayNotReachable)(nil)).Elem() - minAPIVersionForType["GatewayNotReachable"] = "6.0" } type GatewayNotReachableFault GatewayNotReachable @@ -30757,7 +30743,6 @@ type GatewayOperationRefused struct { func init() { t["GatewayOperationRefused"] = reflect.TypeOf((*GatewayOperationRefused)(nil)).Elem() - minAPIVersionForType["GatewayOperationRefused"] = "6.0" } type GatewayOperationRefusedFault GatewayOperationRefused @@ -30789,7 +30774,6 @@ type GatewayToHostAuthFault struct { func init() { t["GatewayToHostAuthFault"] = reflect.TypeOf((*GatewayToHostAuthFault)(nil)).Elem() - minAPIVersionForType["GatewayToHostAuthFault"] = "6.0" } type GatewayToHostAuthFaultFault GatewayToHostAuthFault @@ -30816,7 +30800,6 @@ type GatewayToHostConnectFault struct { func init() { t["GatewayToHostConnectFault"] = reflect.TypeOf((*GatewayToHostConnectFault)(nil)).Elem() - minAPIVersionForType["GatewayToHostConnectFault"] = "6.0" } type GatewayToHostConnectFaultFault BaseGatewayToHostConnectFault @@ -30852,7 +30835,6 @@ type GatewayToHostTrustVerifyFault struct { func init() { t["GatewayToHostTrustVerifyFault"] = reflect.TypeOf((*GatewayToHostTrustVerifyFault)(nil)).Elem() - minAPIVersionForType["GatewayToHostTrustVerifyFault"] = "6.0" } type GatewayToHostTrustVerifyFaultFault GatewayToHostTrustVerifyFault @@ -31184,7 +31166,6 @@ type GenericDrsFault struct { func init() { t["GenericDrsFault"] = reflect.TypeOf((*GenericDrsFault)(nil)).Elem() - minAPIVersionForType["GenericDrsFault"] = "2.5" } type GenericDrsFaultFault GenericDrsFault @@ -31275,6 +31256,7 @@ type GetCryptoKeyStatusRequestType struct { func init() { t["GetCryptoKeyStatusRequestType"] = reflect.TypeOf((*GetCryptoKeyStatusRequestType)(nil)).Elem() + minAPIVersionForType["GetCryptoKeyStatusRequestType"] = "8.0.1.0" } type GetCryptoKeyStatusResponse struct { @@ -31454,7 +31436,6 @@ type GhostDvsProxySwitchDetectedEvent struct { func init() { t["GhostDvsProxySwitchDetectedEvent"] = reflect.TypeOf((*GhostDvsProxySwitchDetectedEvent)(nil)).Elem() - minAPIVersionForType["GhostDvsProxySwitchDetectedEvent"] = "4.0" } // This event records when the ghost DVS proxy switches (a.k.a host @@ -31469,7 +31450,6 @@ type GhostDvsProxySwitchRemovedEvent struct { func init() { t["GhostDvsProxySwitchRemovedEvent"] = reflect.TypeOf((*GhostDvsProxySwitchRemovedEvent)(nil)).Elem() - minAPIVersionForType["GhostDvsProxySwitchRemovedEvent"] = "4.0" } // This event records a change to the global message. @@ -31479,7 +31459,7 @@ type GlobalMessageChangedEvent struct { // The new message that was set. Message string `xml:"message" json:"message"` // The previous message that was set. - PrevMessage string `xml:"prevMessage,omitempty" json:"prevMessage,omitempty" vim:"6.5"` + PrevMessage string `xml:"prevMessage,omitempty" json:"prevMessage,omitempty"` } func init() { @@ -31518,7 +31498,6 @@ type GuestAliases struct { func init() { t["GuestAliases"] = reflect.TypeOf((*GuestAliases)(nil)).Elem() - minAPIVersionForType["GuestAliases"] = "6.0" } // Describes a subject associated with an X.509 certificate in the alias @@ -31534,7 +31513,6 @@ type GuestAuthAliasInfo struct { func init() { t["GuestAuthAliasInfo"] = reflect.TypeOf((*GuestAuthAliasInfo)(nil)).Elem() - minAPIVersionForType["GuestAuthAliasInfo"] = "6.0" } // The ANY subject. @@ -31549,7 +31527,6 @@ type GuestAuthAnySubject struct { func init() { t["GuestAuthAnySubject"] = reflect.TypeOf((*GuestAuthAnySubject)(nil)).Elem() - minAPIVersionForType["GuestAuthAnySubject"] = "6.0" } // A named subject. @@ -31565,7 +31542,6 @@ type GuestAuthNamedSubject struct { func init() { t["GuestAuthNamedSubject"] = reflect.TypeOf((*GuestAuthNamedSubject)(nil)).Elem() - minAPIVersionForType["GuestAuthNamedSubject"] = "6.0" } // A Subject. @@ -31575,7 +31551,6 @@ type GuestAuthSubject struct { func init() { t["GuestAuthSubject"] = reflect.TypeOf((*GuestAuthSubject)(nil)).Elem() - minAPIVersionForType["GuestAuthSubject"] = "6.0" } // GuestAuthentication is an abstract base class for authentication @@ -31592,7 +31567,6 @@ type GuestAuthentication struct { func init() { t["GuestAuthentication"] = reflect.TypeOf((*GuestAuthentication)(nil)).Elem() - minAPIVersionForType["GuestAuthentication"] = "5.0" } // Fault is thrown when a call to `GuestAuthManager.AcquireCredentialsInGuest` requires a challenge @@ -31612,7 +31586,6 @@ type GuestAuthenticationChallenge struct { func init() { t["GuestAuthenticationChallenge"] = reflect.TypeOf((*GuestAuthenticationChallenge)(nil)).Elem() - minAPIVersionForType["GuestAuthenticationChallenge"] = "5.0" } type GuestAuthenticationChallengeFault GuestAuthenticationChallenge @@ -31630,7 +31603,6 @@ type GuestComponentsOutOfDate struct { func init() { t["GuestComponentsOutOfDate"] = reflect.TypeOf((*GuestComponentsOutOfDate)(nil)).Elem() - minAPIVersionForType["GuestComponentsOutOfDate"] = "5.0" } type GuestComponentsOutOfDateFault GuestComponentsOutOfDate @@ -31659,9 +31631,9 @@ type GuestDiskInfo struct { // Filesystem type, if known. // // For example NTFS or ext3. - FilesystemType string `xml:"filesystemType,omitempty" json:"filesystemType,omitempty" vim:"7.0"` + FilesystemType string `xml:"filesystemType,omitempty" json:"filesystemType,omitempty"` // VirtualDisks backing the guest partition, if known. - Mappings []GuestInfoVirtualDiskMapping `xml:"mappings,omitempty" json:"mappings,omitempty" vim:"7.0"` + Mappings []GuestInfoVirtualDiskMapping `xml:"mappings,omitempty" json:"mappings,omitempty"` } func init() { @@ -31712,7 +31684,6 @@ type GuestFileAttributes struct { func init() { t["GuestFileAttributes"] = reflect.TypeOf((*GuestFileAttributes)(nil)).Elem() - minAPIVersionForType["GuestFileAttributes"] = "5.0" } type GuestFileInfo struct { @@ -31752,26 +31723,26 @@ type GuestInfo struct { // // The set of possible values is described in // `VirtualMachineToolsVersionStatus_enum` for vSphere API 5.0. - ToolsVersionStatus string `xml:"toolsVersionStatus,omitempty" json:"toolsVersionStatus,omitempty" vim:"4.0"` + ToolsVersionStatus string `xml:"toolsVersionStatus,omitempty" json:"toolsVersionStatus,omitempty"` // Current version status of VMware Tools in the guest operating system, // if known. // // The set of possible values is described in // `VirtualMachineToolsVersionStatus_enum` - ToolsVersionStatus2 string `xml:"toolsVersionStatus2,omitempty" json:"toolsVersionStatus2,omitempty" vim:"5.0"` + ToolsVersionStatus2 string `xml:"toolsVersionStatus2,omitempty" json:"toolsVersionStatus2,omitempty"` // Current running status of VMware Tools in the guest operating system, // if known. // // The set of possible values is described in // `VirtualMachineToolsRunningStatus_enum` - ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty" json:"toolsRunningStatus,omitempty" vim:"4.0"` + ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty" json:"toolsRunningStatus,omitempty"` // Current version of VMware Tools, if known. ToolsVersion string `xml:"toolsVersion,omitempty" json:"toolsVersion,omitempty"` // Current installation type of VMware Tools in the guest operating system. // // The set of possible values is described in // `VirtualMachineToolsInstallType_enum` - ToolsInstallType string `xml:"toolsInstallType,omitempty" json:"toolsInstallType,omitempty" vim:"6.5"` + ToolsInstallType string `xml:"toolsInstallType,omitempty" json:"toolsInstallType,omitempty"` // Guest operating system identifier (short name), if known. GuestId string `xml:"guestId,omitempty" json:"guestId,omitempty"` // Guest operating system family, if known. @@ -31863,7 +31834,7 @@ type GuestInfo struct { // Guest information about network adapters, if known. Net []GuestNicInfo `xml:"net,omitempty" json:"net,omitempty"` // Guest information about IP networking stack, if known. - IpStack []GuestStackInfo `xml:"ipStack,omitempty" json:"ipStack,omitempty" vim:"4.1"` + IpStack []GuestStackInfo `xml:"ipStack,omitempty" json:"ipStack,omitempty"` // Guest information about disks. // // You can obtain Linux guest disk information for the following file system @@ -31879,21 +31850,21 @@ type GuestInfo struct { // Operation mode of guest operating system. // // One of: - // - "running" - Guest is running normally. - // - "shuttingdown" - Guest has a pending shutdown command. - // - "resetting" - Guest has a pending reset command. - // - "standby" - Guest has a pending standby command. - // - "notrunning" - Guest is not running. - // - "unknown" - Guest information is not available. + // - "running" - Guest is running normally. + // - "shuttingdown" - Guest has a pending shutdown command. + // - "resetting" - Guest has a pending reset command. + // - "standby" - Guest has a pending standby command. + // - "notrunning" - Guest is not running. + // - "unknown" - Guest information is not available. GuestState string `xml:"guestState" json:"guestState"` // Application heartbeat status. // // Please see `VirtualMachineAppHeartbeatStatusType_enum` - AppHeartbeatStatus string `xml:"appHeartbeatStatus,omitempty" json:"appHeartbeatStatus,omitempty" vim:"4.1"` + AppHeartbeatStatus string `xml:"appHeartbeatStatus,omitempty" json:"appHeartbeatStatus,omitempty"` // Guest operating system's kernel crash state. // // If true, the guest operating system's kernel has crashed. - GuestKernelCrashed *bool `xml:"guestKernelCrashed" json:"guestKernelCrashed,omitempty" vim:"6.0"` + GuestKernelCrashed *bool `xml:"guestKernelCrashed" json:"guestKernelCrashed,omitempty"` // Application state. // // If vSphere HA is enabled and the vm is configured for Application Monitoring @@ -31904,20 +31875,20 @@ type GuestInfo struct { // conditions the value is changed to appStateOk the reset will be cancelled. // // See also `GuestInfoAppStateType_enum`. - AppState string `xml:"appState,omitempty" json:"appState,omitempty" vim:"5.5"` + AppState string `xml:"appState,omitempty" json:"appState,omitempty"` // Guest Operations availability. // // If true, the virtual machine is ready to process guest operations. - GuestOperationsReady *bool `xml:"guestOperationsReady" json:"guestOperationsReady,omitempty" vim:"5.0"` + GuestOperationsReady *bool `xml:"guestOperationsReady" json:"guestOperationsReady,omitempty"` // Interactive Guest Operations availability. // // If true, the virtual machine is ready to process guest operations // as the user interacting with the guest desktop. - InteractiveGuestOperationsReady *bool `xml:"interactiveGuestOperationsReady" json:"interactiveGuestOperationsReady,omitempty" vim:"5.0"` + InteractiveGuestOperationsReady *bool `xml:"interactiveGuestOperationsReady" json:"interactiveGuestOperationsReady,omitempty"` // State change support. // // If true, the virtual machine is ready to process soft power operations. - GuestStateChangeSupported *bool `xml:"guestStateChangeSupported" json:"guestStateChangeSupported,omitempty" vim:"6.0"` + GuestStateChangeSupported *bool `xml:"guestStateChangeSupported" json:"guestStateChangeSupported,omitempty"` // A list of namespaces and their corresponding generation numbers. // // Only namespaces with non-zero @@ -31925,9 +31896,9 @@ type GuestInfo struct { // are guaranteed to be present here. // Use `VirtualMachineNamespaceManager.ListNamespaces` to retrieve list of // namespaces. - GenerationInfo []GuestInfoNamespaceGenerationInfo `xml:"generationInfo,omitempty" json:"generationInfo,omitempty" vim:"5.1"` + GenerationInfo []GuestInfoNamespaceGenerationInfo `xml:"generationInfo,omitempty" json:"generationInfo,omitempty"` // The hardware version string for this virtual machine. - HwVersion string `xml:"hwVersion,omitempty" json:"hwVersion,omitempty" vim:"6.9.1"` + HwVersion string `xml:"hwVersion,omitempty" json:"hwVersion,omitempty"` // Guest OS Customization status info. CustomizationInfo *GuestInfoCustomizationInfo `xml:"customizationInfo,omitempty" json:"customizationInfo,omitempty" vim:"7.0.2.0"` } @@ -31956,6 +31927,7 @@ type GuestInfoCustomizationInfo struct { func init() { t["GuestInfoCustomizationInfo"] = reflect.TypeOf((*GuestInfoCustomizationInfo)(nil)).Elem() + minAPIVersionForType["GuestInfoCustomizationInfo"] = "7.0.2.0" } // A data class for the namespace and its corresponding generation number @@ -31979,7 +31951,6 @@ type GuestInfoNamespaceGenerationInfo struct { func init() { t["GuestInfoNamespaceGenerationInfo"] = reflect.TypeOf((*GuestInfoNamespaceGenerationInfo)(nil)).Elem() - minAPIVersionForType["GuestInfoNamespaceGenerationInfo"] = "5.1" } // Describes the virtual disk backing a local guest disk. @@ -31994,7 +31965,6 @@ type GuestInfoVirtualDiskMapping struct { func init() { t["GuestInfoVirtualDiskMapping"] = reflect.TypeOf((*GuestInfoVirtualDiskMapping)(nil)).Elem() - minAPIVersionForType["GuestInfoVirtualDiskMapping"] = "7.0" } type GuestListFileInfo struct { @@ -32032,7 +32002,6 @@ type GuestMappedAliases struct { func init() { t["GuestMappedAliases"] = reflect.TypeOf((*GuestMappedAliases)(nil)).Elem() - minAPIVersionForType["GuestMappedAliases"] = "6.0" } // A GuestMultipleMappings exception is thrown when an @@ -32045,7 +32014,6 @@ type GuestMultipleMappings struct { func init() { t["GuestMultipleMappings"] = reflect.TypeOf((*GuestMultipleMappings)(nil)).Elem() - minAPIVersionForType["GuestMultipleMappings"] = "6.0" } type GuestMultipleMappingsFault GuestMultipleMappings @@ -32076,13 +32044,13 @@ type GuestNicInfo struct { // This property is set only when Guest OS supports it. // See `GuestStackInfo` dnsConfig for system wide // settings. - DnsConfig *NetDnsConfigInfo `xml:"dnsConfig,omitempty" json:"dnsConfig,omitempty" vim:"4.1"` + DnsConfig *NetDnsConfigInfo `xml:"dnsConfig,omitempty" json:"dnsConfig,omitempty"` // IP configuration settings of the adapter // See `GuestStackInfo` ipStackConfig for system wide // settings. - IpConfig *NetIpConfigInfo `xml:"ipConfig,omitempty" json:"ipConfig,omitempty" vim:"4.1"` + IpConfig *NetIpConfigInfo `xml:"ipConfig,omitempty" json:"ipConfig,omitempty"` // NetBIOS configuration of the adapter - NetBIOSConfig BaseNetBIOSConfigInfo `xml:"netBIOSConfig,omitempty,typeattr" json:"netBIOSConfig,omitempty" vim:"4.1"` + NetBIOSConfig BaseNetBIOSConfigInfo `xml:"netBIOSConfig,omitempty,typeattr" json:"netBIOSConfig,omitempty"` } func init() { @@ -32096,7 +32064,6 @@ type GuestOperationsFault struct { func init() { t["GuestOperationsFault"] = reflect.TypeOf((*GuestOperationsFault)(nil)).Elem() - minAPIVersionForType["GuestOperationsFault"] = "5.0" } type GuestOperationsFaultFault BaseGuestOperationsFault @@ -32114,7 +32081,6 @@ type GuestOperationsUnavailable struct { func init() { t["GuestOperationsUnavailable"] = reflect.TypeOf((*GuestOperationsUnavailable)(nil)).Elem() - minAPIVersionForType["GuestOperationsUnavailable"] = "5.0" } type GuestOperationsUnavailableFault GuestOperationsUnavailable @@ -32140,9 +32106,9 @@ type GuestOsDescriptor struct { // Maximum number of processors supported for this guest. SupportedMaxCPUs int32 `xml:"supportedMaxCPUs" json:"supportedMaxCPUs"` // Maximum number of sockets supported for this guest. - NumSupportedPhysicalSockets int32 `xml:"numSupportedPhysicalSockets,omitempty" json:"numSupportedPhysicalSockets,omitempty" vim:"5.0"` + NumSupportedPhysicalSockets int32 `xml:"numSupportedPhysicalSockets,omitempty" json:"numSupportedPhysicalSockets,omitempty"` // Maximum number of cores per socket for this guest. - NumSupportedCoresPerSocket int32 `xml:"numSupportedCoresPerSocket,omitempty" json:"numSupportedCoresPerSocket,omitempty" vim:"5.0"` + NumSupportedCoresPerSocket int32 `xml:"numSupportedCoresPerSocket,omitempty" json:"numSupportedCoresPerSocket,omitempty"` // Minimum memory requirements supported for this guest, in MB. SupportedMinMemMB int32 `xml:"supportedMinMemMB" json:"supportedMinMemMB"` // Maximum memory requirements supported for this guest, in MB. @@ -32162,7 +32128,7 @@ type GuestOsDescriptor struct { // Recommended default disk size for this guest, in MB. RecommendedDiskSizeMB int32 `xml:"recommendedDiskSizeMB" json:"recommendedDiskSizeMB"` // Recommended default CD-ROM type for this guest. - RecommendedCdromController string `xml:"recommendedCdromController,omitempty" json:"recommendedCdromController,omitempty" vim:"5.5"` + RecommendedCdromController string `xml:"recommendedCdromController,omitempty" json:"recommendedCdromController,omitempty"` // List of supported ethernet cards for this guest. SupportedEthernetCard []string `xml:"supportedEthernetCard" json:"supportedEthernetCard"` // Recommended default ethernet controller type for this guest. @@ -32175,71 +32141,71 @@ type GuestOsDescriptor struct { // Flag that indicates wether the guest requires an SMC (Apple hardware). // // This is logically equivalent to GuestOS = Mac OS - SmcRequired *bool `xml:"smcRequired" json:"smcRequired,omitempty" vim:"5.0"` + SmcRequired *bool `xml:"smcRequired" json:"smcRequired,omitempty"` // Flag to indicate whether or not this guest can support Wake-on-LAN. SupportsWakeOnLan bool `xml:"supportsWakeOnLan" json:"supportsWakeOnLan"` // Flag indicating whether or not this guest supports the virtual // machine interface. - SupportsVMI *bool `xml:"supportsVMI" json:"supportsVMI,omitempty" vim:"2.5 U2"` + SupportsVMI *bool `xml:"supportsVMI" json:"supportsVMI,omitempty"` // Whether the memory size for this guest can be changed // while the virtual machine is running. - SupportsMemoryHotAdd *bool `xml:"supportsMemoryHotAdd" json:"supportsMemoryHotAdd,omitempty" vim:"2.5 U2"` + SupportsMemoryHotAdd *bool `xml:"supportsMemoryHotAdd" json:"supportsMemoryHotAdd,omitempty"` // Whether virtual CPUs can be added to this guest // while the virtual machine is running. - SupportsCpuHotAdd *bool `xml:"supportsCpuHotAdd" json:"supportsCpuHotAdd,omitempty" vim:"2.5 U2"` + SupportsCpuHotAdd *bool `xml:"supportsCpuHotAdd" json:"supportsCpuHotAdd,omitempty"` // Whether virtual CPUs can be removed from this guest // while the virtual machine is running. - SupportsCpuHotRemove *bool `xml:"supportsCpuHotRemove" json:"supportsCpuHotRemove,omitempty" vim:"2.5 U2"` + SupportsCpuHotRemove *bool `xml:"supportsCpuHotRemove" json:"supportsCpuHotRemove,omitempty"` // Supported firmware types for this guest. // // Possible values are described in // `GuestOsDescriptorFirmwareType_enum` - SupportedFirmware []string `xml:"supportedFirmware,omitempty" json:"supportedFirmware,omitempty" vim:"5.0"` + SupportedFirmware []string `xml:"supportedFirmware,omitempty" json:"supportedFirmware,omitempty"` // Recommended firmware type for this guest. // // Possible values are described in // `GuestOsDescriptorFirmwareType_enum` - RecommendedFirmware string `xml:"recommendedFirmware,omitempty" json:"recommendedFirmware,omitempty" vim:"5.0"` + RecommendedFirmware string `xml:"recommendedFirmware,omitempty" json:"recommendedFirmware,omitempty"` // List of supported USB controllers for this guest. - SupportedUSBControllerList []string `xml:"supportedUSBControllerList,omitempty" json:"supportedUSBControllerList,omitempty" vim:"5.0"` + SupportedUSBControllerList []string `xml:"supportedUSBControllerList,omitempty" json:"supportedUSBControllerList,omitempty"` // Recommended default USB controller type for this guest. - RecommendedUSBController string `xml:"recommendedUSBController,omitempty" json:"recommendedUSBController,omitempty" vim:"5.0"` + RecommendedUSBController string `xml:"recommendedUSBController,omitempty" json:"recommendedUSBController,omitempty"` // Whether this guest supports 3D graphics. - Supports3D *bool `xml:"supports3D" json:"supports3D,omitempty" vim:"5.0"` + Supports3D *bool `xml:"supports3D" json:"supports3D,omitempty"` // Recommended 3D graphics for this guest. - Recommended3D *bool `xml:"recommended3D" json:"recommended3D,omitempty" vim:"5.1"` + Recommended3D *bool `xml:"recommended3D" json:"recommended3D,omitempty"` // Whether SMC (Apple hardware) is recommended for this guest. - SmcRecommended *bool `xml:"smcRecommended" json:"smcRecommended,omitempty" vim:"5.0"` + SmcRecommended *bool `xml:"smcRecommended" json:"smcRecommended,omitempty"` // Whether I/O Controller Hub is recommended for this guest. - Ich7mRecommended *bool `xml:"ich7mRecommended" json:"ich7mRecommended,omitempty" vim:"5.0"` + Ich7mRecommended *bool `xml:"ich7mRecommended" json:"ich7mRecommended,omitempty"` // Whether USB controller is recommended for this guest. - UsbRecommended *bool `xml:"usbRecommended" json:"usbRecommended,omitempty" vim:"5.0"` + UsbRecommended *bool `xml:"usbRecommended" json:"usbRecommended,omitempty"` // Support level of this Guest // Possible values are described in // `GuestOsDescriptorSupportLevel_enum` - SupportLevel string `xml:"supportLevel,omitempty" json:"supportLevel,omitempty" vim:"5.0"` + SupportLevel string `xml:"supportLevel,omitempty" json:"supportLevel,omitempty"` // Whether or not this guest should be allowed for selection // during virtual machine creation. - SupportedForCreate *bool `xml:"supportedForCreate" json:"supportedForCreate,omitempty" vim:"5.0"` + SupportedForCreate *bool `xml:"supportedForCreate" json:"supportedForCreate,omitempty"` // Video RAM size limits supported by this guest, in KB. - VRAMSizeInKB *IntOption `xml:"vRAMSizeInKB,omitempty" json:"vRAMSizeInKB,omitempty" vim:"5.0"` + VRAMSizeInKB *IntOption `xml:"vRAMSizeInKB,omitempty" json:"vRAMSizeInKB,omitempty"` // Maximum number of floppies supported by this guest. - NumSupportedFloppyDevices int32 `xml:"numSupportedFloppyDevices,omitempty" json:"numSupportedFloppyDevices,omitempty" vim:"6.0"` + NumSupportedFloppyDevices int32 `xml:"numSupportedFloppyDevices,omitempty" json:"numSupportedFloppyDevices,omitempty"` // List of NICs supported by this guest that support Wake-On-Lan. - WakeOnLanEthernetCard []string `xml:"wakeOnLanEthernetCard,omitempty" json:"wakeOnLanEthernetCard,omitempty" vim:"6.0"` + WakeOnLanEthernetCard []string `xml:"wakeOnLanEthernetCard,omitempty" json:"wakeOnLanEthernetCard,omitempty"` // Whether or not this guest can use pvscsi as boot adapter. - SupportsPvscsiControllerForBoot *bool `xml:"supportsPvscsiControllerForBoot" json:"supportsPvscsiControllerForBoot,omitempty" vim:"6.0"` + SupportsPvscsiControllerForBoot *bool `xml:"supportsPvscsiControllerForBoot" json:"supportsPvscsiControllerForBoot,omitempty"` // Whether or not this guest should have disk uuid enabled by default. - DiskUuidEnabled *bool `xml:"diskUuidEnabled" json:"diskUuidEnabled,omitempty" vim:"6.0"` + DiskUuidEnabled *bool `xml:"diskUuidEnabled" json:"diskUuidEnabled,omitempty"` // Whether or not this guest supports hot plug of PCI devices. - SupportsHotPlugPCI *bool `xml:"supportsHotPlugPCI" json:"supportsHotPlugPCI,omitempty" vim:"6.0"` + SupportsHotPlugPCI *bool `xml:"supportsHotPlugPCI" json:"supportsHotPlugPCI,omitempty"` // Whether or not this guest supports Secure Boot. // // If some of the OS // releases that fall under this guest OS descriptor support Secure Boot, it // is reasonable to offer the ability to enable Secure Boot. Only // meaningful when virtual EFI firmware is in use. - SupportsSecureBoot *bool `xml:"supportsSecureBoot" json:"supportsSecureBoot,omitempty" vim:"6.5"` + SupportsSecureBoot *bool `xml:"supportsSecureBoot" json:"supportsSecureBoot,omitempty"` // Whether or not Secure Boot should be enabled by default for this // guest OS. // @@ -32247,50 +32213,50 @@ type GuestOsDescriptor struct { // support Secure Boot and are known to operate correctly with Secure Boot // enabled, it is reasonable to enable it by default. Only meaningful when // virtual EFI firmware is in use. - DefaultSecureBoot *bool `xml:"defaultSecureBoot" json:"defaultSecureBoot,omitempty" vim:"6.5"` + DefaultSecureBoot *bool `xml:"defaultSecureBoot" json:"defaultSecureBoot,omitempty"` // Support of persistent memory (virtual NVDIMM device). // // See also `VirtualNVDIMM`. - PersistentMemorySupported *bool `xml:"persistentMemorySupported" json:"persistentMemorySupported,omitempty" vim:"6.7"` + PersistentMemorySupported *bool `xml:"persistentMemorySupported" json:"persistentMemorySupported,omitempty"` // Minimum persistent memory supported for this guest, in MB. - SupportedMinPersistentMemoryMB int64 `xml:"supportedMinPersistentMemoryMB,omitempty" json:"supportedMinPersistentMemoryMB,omitempty" vim:"6.7"` + SupportedMinPersistentMemoryMB int64 `xml:"supportedMinPersistentMemoryMB,omitempty" json:"supportedMinPersistentMemoryMB,omitempty"` // Maximum persistent memory supported for this guest, in MB. // // Total size of all the virtual NVDIMM devices should be less // than this value. - SupportedMaxPersistentMemoryMB int64 `xml:"supportedMaxPersistentMemoryMB,omitempty" json:"supportedMaxPersistentMemoryMB,omitempty" vim:"6.7"` + SupportedMaxPersistentMemoryMB int64 `xml:"supportedMaxPersistentMemoryMB,omitempty" json:"supportedMaxPersistentMemoryMB,omitempty"` // Recommended default persistent memory size for this guest, in MB. - RecommendedPersistentMemoryMB int64 `xml:"recommendedPersistentMemoryMB,omitempty" json:"recommendedPersistentMemoryMB,omitempty" vim:"6.7"` + RecommendedPersistentMemoryMB int64 `xml:"recommendedPersistentMemoryMB,omitempty" json:"recommendedPersistentMemoryMB,omitempty"` // Support of persistent memory hot-add operation. - PersistentMemoryHotAddSupported *bool `xml:"persistentMemoryHotAddSupported" json:"persistentMemoryHotAddSupported,omitempty" vim:"6.7"` + PersistentMemoryHotAddSupported *bool `xml:"persistentMemoryHotAddSupported" json:"persistentMemoryHotAddSupported,omitempty"` // Support of persistent memory hot-remove operation. - PersistentMemoryHotRemoveSupported *bool `xml:"persistentMemoryHotRemoveSupported" json:"persistentMemoryHotRemoveSupported,omitempty" vim:"6.7"` + PersistentMemoryHotRemoveSupported *bool `xml:"persistentMemoryHotRemoveSupported" json:"persistentMemoryHotRemoveSupported,omitempty"` // Support of virtual NVDIMM cold-growth operation. - PersistentMemoryColdGrowthSupported *bool `xml:"persistentMemoryColdGrowthSupported" json:"persistentMemoryColdGrowthSupported,omitempty" vim:"6.7"` + PersistentMemoryColdGrowthSupported *bool `xml:"persistentMemoryColdGrowthSupported" json:"persistentMemoryColdGrowthSupported,omitempty"` // Virtual NVDIMM cold-growth granularity in MB. - PersistentMemoryColdGrowthGranularityMB int64 `xml:"persistentMemoryColdGrowthGranularityMB,omitempty" json:"persistentMemoryColdGrowthGranularityMB,omitempty" vim:"6.7"` + PersistentMemoryColdGrowthGranularityMB int64 `xml:"persistentMemoryColdGrowthGranularityMB,omitempty" json:"persistentMemoryColdGrowthGranularityMB,omitempty"` // Support of virtual NVDIMM hot-growth operation. - PersistentMemoryHotGrowthSupported *bool `xml:"persistentMemoryHotGrowthSupported" json:"persistentMemoryHotGrowthSupported,omitempty" vim:"6.7"` + PersistentMemoryHotGrowthSupported *bool `xml:"persistentMemoryHotGrowthSupported" json:"persistentMemoryHotGrowthSupported,omitempty"` // Virtual NVDIMM hot-growth granularity in MB. - PersistentMemoryHotGrowthGranularityMB int64 `xml:"persistentMemoryHotGrowthGranularityMB,omitempty" json:"persistentMemoryHotGrowthGranularityMB,omitempty" vim:"6.7"` + PersistentMemoryHotGrowthGranularityMB int64 `xml:"persistentMemoryHotGrowthGranularityMB,omitempty" json:"persistentMemoryHotGrowthGranularityMB,omitempty"` // Recommended number of sockets for this guest. - NumRecommendedPhysicalSockets int32 `xml:"numRecommendedPhysicalSockets,omitempty" json:"numRecommendedPhysicalSockets,omitempty" vim:"6.7"` + NumRecommendedPhysicalSockets int32 `xml:"numRecommendedPhysicalSockets,omitempty" json:"numRecommendedPhysicalSockets,omitempty"` // Recommended number of cores per socket for this guest. - NumRecommendedCoresPerSocket int32 `xml:"numRecommendedCoresPerSocket,omitempty" json:"numRecommendedCoresPerSocket,omitempty" vim:"6.7"` + NumRecommendedCoresPerSocket int32 `xml:"numRecommendedCoresPerSocket,omitempty" json:"numRecommendedCoresPerSocket,omitempty"` // Support of Intel Virtualization Technology for Directed I/O. - VvtdSupported *BoolOption `xml:"vvtdSupported,omitempty" json:"vvtdSupported,omitempty" vim:"6.7"` + VvtdSupported *BoolOption `xml:"vvtdSupported,omitempty" json:"vvtdSupported,omitempty"` // Support of Virtualization-based security. - VbsSupported *BoolOption `xml:"vbsSupported,omitempty" json:"vbsSupported,omitempty" vim:"6.7"` + VbsSupported *BoolOption `xml:"vbsSupported,omitempty" json:"vbsSupported,omitempty"` // Support for Intel Software Guard Extensions - VsgxSupported *BoolOption `xml:"vsgxSupported,omitempty" json:"vsgxSupported,omitempty" vim:"7.0"` + VsgxSupported *BoolOption `xml:"vsgxSupported,omitempty" json:"vsgxSupported,omitempty"` // Support for Intel Software Guard Extensions remote attestation. VsgxRemoteAttestationSupported *bool `xml:"vsgxRemoteAttestationSupported" json:"vsgxRemoteAttestationSupported,omitempty" vim:"8.0.0.1"` // Support for TPM 2.0. - SupportsTPM20 *bool `xml:"supportsTPM20" json:"supportsTPM20,omitempty" vim:"6.7"` + SupportsTPM20 *bool `xml:"supportsTPM20" json:"supportsTPM20,omitempty"` // Support for default vTPM RecommendedTPM20 *bool `xml:"recommendedTPM20" json:"recommendedTPM20,omitempty" vim:"8.0.0.1"` // Support for Virtual Watchdog Timer. - VwdtSupported *bool `xml:"vwdtSupported" json:"vwdtSupported,omitempty" vim:"7.0"` + VwdtSupported *bool `xml:"vwdtSupported" json:"vwdtSupported,omitempty"` } func init() { @@ -32306,7 +32272,6 @@ type GuestPermissionDenied struct { func init() { t["GuestPermissionDenied"] = reflect.TypeOf((*GuestPermissionDenied)(nil)).Elem() - minAPIVersionForType["GuestPermissionDenied"] = "5.0" } type GuestPermissionDeniedFault GuestPermissionDenied @@ -32350,7 +32315,6 @@ type GuestPosixFileAttributes struct { func init() { t["GuestPosixFileAttributes"] = reflect.TypeOf((*GuestPosixFileAttributes)(nil)).Elem() - minAPIVersionForType["GuestPosixFileAttributes"] = "5.0" } type GuestProcessInfo struct { @@ -32393,7 +32357,6 @@ type GuestProcessNotFound struct { func init() { t["GuestProcessNotFound"] = reflect.TypeOf((*GuestProcessNotFound)(nil)).Elem() - minAPIVersionForType["GuestProcessNotFound"] = "5.0" } type GuestProcessNotFoundFault GuestProcessNotFound @@ -32457,7 +32420,6 @@ type GuestProgramSpec struct { func init() { t["GuestProgramSpec"] = reflect.TypeOf((*GuestProgramSpec)(nil)).Elem() - minAPIVersionForType["GuestProgramSpec"] = "5.0" } // This describes the registry key name. @@ -32472,7 +32434,6 @@ type GuestRegKeyNameSpec struct { func init() { t["GuestRegKeyNameSpec"] = reflect.TypeOf((*GuestRegKeyNameSpec)(nil)).Elem() - minAPIVersionForType["GuestRegKeyNameSpec"] = "6.0" } // This describes the registry key record. @@ -32491,7 +32452,6 @@ type GuestRegKeyRecordSpec struct { func init() { t["GuestRegKeyRecordSpec"] = reflect.TypeOf((*GuestRegKeyRecordSpec)(nil)).Elem() - minAPIVersionForType["GuestRegKeyRecordSpec"] = "6.0" } // This describes the registry key. @@ -32508,7 +32468,6 @@ type GuestRegKeySpec struct { func init() { t["GuestRegKeySpec"] = reflect.TypeOf((*GuestRegKeySpec)(nil)).Elem() - minAPIVersionForType["GuestRegKeySpec"] = "6.0" } // This describes the registry value binary. @@ -32524,7 +32483,6 @@ type GuestRegValueBinarySpec struct { func init() { t["GuestRegValueBinarySpec"] = reflect.TypeOf((*GuestRegValueBinarySpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueBinarySpec"] = "6.0" } // This describes the registry value data. @@ -32534,7 +32492,6 @@ type GuestRegValueDataSpec struct { func init() { t["GuestRegValueDataSpec"] = reflect.TypeOf((*GuestRegValueDataSpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueDataSpec"] = "6.0" } // This describes the registry value dword. @@ -32547,7 +32504,6 @@ type GuestRegValueDwordSpec struct { func init() { t["GuestRegValueDwordSpec"] = reflect.TypeOf((*GuestRegValueDwordSpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueDwordSpec"] = "6.0" } // This describes the registry value expand string. @@ -32563,7 +32519,6 @@ type GuestRegValueExpandStringSpec struct { func init() { t["GuestRegValueExpandStringSpec"] = reflect.TypeOf((*GuestRegValueExpandStringSpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueExpandStringSpec"] = "6.0" } // This describes the registry value multi string. @@ -32579,7 +32534,6 @@ type GuestRegValueMultiStringSpec struct { func init() { t["GuestRegValueMultiStringSpec"] = reflect.TypeOf((*GuestRegValueMultiStringSpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueMultiStringSpec"] = "6.0" } // This describes the registry value name. @@ -32594,7 +32548,6 @@ type GuestRegValueNameSpec struct { func init() { t["GuestRegValueNameSpec"] = reflect.TypeOf((*GuestRegValueNameSpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueNameSpec"] = "6.0" } // This describes the registry value qword. @@ -32607,7 +32560,6 @@ type GuestRegValueQwordSpec struct { func init() { t["GuestRegValueQwordSpec"] = reflect.TypeOf((*GuestRegValueQwordSpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueQwordSpec"] = "6.0" } // This describes the registry value. @@ -32630,7 +32582,6 @@ type GuestRegValueSpec struct { func init() { t["GuestRegValueSpec"] = reflect.TypeOf((*GuestRegValueSpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueSpec"] = "6.0" } // This describes the registry value string. @@ -32646,7 +32597,6 @@ type GuestRegValueStringSpec struct { func init() { t["GuestRegValueStringSpec"] = reflect.TypeOf((*GuestRegValueStringSpec)(nil)).Elem() - minAPIVersionForType["GuestRegValueStringSpec"] = "6.0" } // A GuestRegistryFault exception is thrown when an operation fails @@ -32660,7 +32610,6 @@ type GuestRegistryFault struct { func init() { t["GuestRegistryFault"] = reflect.TypeOf((*GuestRegistryFault)(nil)).Elem() - minAPIVersionForType["GuestRegistryFault"] = "6.0" } type GuestRegistryFaultFault BaseGuestRegistryFault @@ -32677,7 +32626,6 @@ type GuestRegistryKeyAlreadyExists struct { func init() { t["GuestRegistryKeyAlreadyExists"] = reflect.TypeOf((*GuestRegistryKeyAlreadyExists)(nil)).Elem() - minAPIVersionForType["GuestRegistryKeyAlreadyExists"] = "6.0" } type GuestRegistryKeyAlreadyExistsFault GuestRegistryKeyAlreadyExists @@ -32697,7 +32645,6 @@ type GuestRegistryKeyFault struct { func init() { t["GuestRegistryKeyFault"] = reflect.TypeOf((*GuestRegistryKeyFault)(nil)).Elem() - minAPIVersionForType["GuestRegistryKeyFault"] = "6.0" } type GuestRegistryKeyFaultFault BaseGuestRegistryKeyFault @@ -32717,7 +32664,6 @@ type GuestRegistryKeyHasSubkeys struct { func init() { t["GuestRegistryKeyHasSubkeys"] = reflect.TypeOf((*GuestRegistryKeyHasSubkeys)(nil)).Elem() - minAPIVersionForType["GuestRegistryKeyHasSubkeys"] = "6.0" } type GuestRegistryKeyHasSubkeysFault GuestRegistryKeyHasSubkeys @@ -32735,7 +32681,6 @@ type GuestRegistryKeyInvalid struct { func init() { t["GuestRegistryKeyInvalid"] = reflect.TypeOf((*GuestRegistryKeyInvalid)(nil)).Elem() - minAPIVersionForType["GuestRegistryKeyInvalid"] = "6.0" } type GuestRegistryKeyInvalidFault GuestRegistryKeyInvalid @@ -32752,7 +32697,6 @@ type GuestRegistryKeyParentVolatile struct { func init() { t["GuestRegistryKeyParentVolatile"] = reflect.TypeOf((*GuestRegistryKeyParentVolatile)(nil)).Elem() - minAPIVersionForType["GuestRegistryKeyParentVolatile"] = "6.0" } type GuestRegistryKeyParentVolatileFault GuestRegistryKeyParentVolatile @@ -32774,7 +32718,6 @@ type GuestRegistryValueFault struct { func init() { t["GuestRegistryValueFault"] = reflect.TypeOf((*GuestRegistryValueFault)(nil)).Elem() - minAPIVersionForType["GuestRegistryValueFault"] = "6.0" } type GuestRegistryValueFaultFault BaseGuestRegistryValueFault @@ -32791,7 +32734,6 @@ type GuestRegistryValueNotFound struct { func init() { t["GuestRegistryValueNotFound"] = reflect.TypeOf((*GuestRegistryValueNotFound)(nil)).Elem() - minAPIVersionForType["GuestRegistryValueNotFound"] = "6.0" } type GuestRegistryValueNotFoundFault GuestRegistryValueNotFound @@ -32847,7 +32789,6 @@ type GuestStackInfo struct { func init() { t["GuestStackInfo"] = reflect.TypeOf((*GuestStackInfo)(nil)).Elem() - minAPIVersionForType["GuestStackInfo"] = "4.1" } // Different attributes for a Windows guest file. @@ -32883,7 +32824,6 @@ type GuestWindowsFileAttributes struct { func init() { t["GuestWindowsFileAttributes"] = reflect.TypeOf((*GuestWindowsFileAttributes)(nil)).Elem() - minAPIVersionForType["GuestWindowsFileAttributes"] = "5.0" } // This describes the arguments to `GuestProcessManager.StartProgramInGuest` that apply @@ -32897,7 +32837,6 @@ type GuestWindowsProgramSpec struct { func init() { t["GuestWindowsProgramSpec"] = reflect.TypeOf((*GuestWindowsProgramSpec)(nil)).Elem() - minAPIVersionForType["GuestWindowsProgramSpec"] = "5.0" } // The destination compute resource is HA-enabled, and HA is not running @@ -32914,7 +32853,6 @@ type HAErrorsAtDest struct { func init() { t["HAErrorsAtDest"] = reflect.TypeOf((*HAErrorsAtDest)(nil)).Elem() - minAPIVersionForType["HAErrorsAtDest"] = "2.5" } type HAErrorsAtDestFault HAErrorsAtDest @@ -33105,11 +33043,15 @@ type HbrDiskMigrationAction struct { // Unit: percentage. For example, if set to 70.0, space utilization is 70%. // If not set, the value is not available. SpaceUtilDstAfter float32 `xml:"spaceUtilDstAfter,omitempty" json:"spaceUtilDstAfter,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // I/O latency on the source datastore before storage migration. // // Unit: millisecond. // If not set, the value is not available. IoLatencySrcBefore float32 `xml:"ioLatencySrcBefore,omitempty" json:"ioLatencySrcBefore,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // I/O latency on the destination datastore before storage migration. // // Unit: millisecond. @@ -33119,7 +33061,6 @@ type HbrDiskMigrationAction struct { func init() { t["HbrDiskMigrationAction"] = reflect.TypeOf((*HbrDiskMigrationAction)(nil)).Elem() - minAPIVersionForType["HbrDiskMigrationAction"] = "6.0" } // This data object represents the essential information about the @@ -33152,7 +33093,6 @@ type HbrManagerReplicationVmInfo struct { func init() { t["HbrManagerReplicationVmInfo"] = reflect.TypeOf((*HbrManagerReplicationVmInfo)(nil)).Elem() - minAPIVersionForType["HbrManagerReplicationVmInfo"] = "5.0" } // This data object represents the capabilities of a given @@ -33160,6 +33100,7 @@ func init() { type HbrManagerVmReplicationCapability struct { DynamicData + // Refers instance of `VirtualMachine`. Vm ManagedObjectReference `xml:"vm" json:"vm"` // A string representing the current `QuiesceMode_enum` of the virtual machine. SupportedQuiesceMode string `xml:"supportedQuiesceMode" json:"supportedQuiesceMode"` @@ -33182,7 +33123,6 @@ type HbrManagerVmReplicationCapability struct { func init() { t["HbrManagerVmReplicationCapability"] = reflect.TypeOf((*HbrManagerVmReplicationCapability)(nil)).Elem() - minAPIVersionForType["HbrManagerVmReplicationCapability"] = "6.0" } // Event used to report change in health status of VirtualCenter components. @@ -33198,12 +33138,11 @@ type HealthStatusChangedEvent struct { // Component name. ComponentName string `xml:"componentName" json:"componentName"` // Service Id of component. - ServiceId string `xml:"serviceId,omitempty" json:"serviceId,omitempty" vim:"6.0"` + ServiceId string `xml:"serviceId,omitempty" json:"serviceId,omitempty"` } func init() { t["HealthStatusChangedEvent"] = reflect.TypeOf((*HealthStatusChangedEvent)(nil)).Elem() - minAPIVersionForType["HealthStatusChangedEvent"] = "4.0" } // The system health runtime information @@ -33218,7 +33157,6 @@ type HealthSystemRuntime struct { func init() { t["HealthSystemRuntime"] = reflect.TypeOf((*HealthSystemRuntime)(nil)).Elem() - minAPIVersionForType["HealthSystemRuntime"] = "2.5" } type HealthUpdate struct { @@ -33279,7 +33217,6 @@ type HeterogenousHostsBlockingEVC struct { func init() { t["HeterogenousHostsBlockingEVC"] = reflect.TypeOf((*HeterogenousHostsBlockingEVC)(nil)).Elem() - minAPIVersionForType["HeterogenousHostsBlockingEVC"] = "2.5u2" } type HeterogenousHostsBlockingEVCFault HeterogenousHostsBlockingEVC @@ -33305,7 +33242,6 @@ type HostAccessControlEntry struct { func init() { t["HostAccessControlEntry"] = reflect.TypeOf((*HostAccessControlEntry)(nil)).Elem() - minAPIVersionForType["HostAccessControlEntry"] = "6.0" } // Fault thrown when an attempt is made to adjust resource settings @@ -33326,7 +33262,6 @@ type HostAccessRestrictedToManagementServer struct { func init() { t["HostAccessRestrictedToManagementServer"] = reflect.TypeOf((*HostAccessRestrictedToManagementServer)(nil)).Elem() - minAPIVersionForType["HostAccessRestrictedToManagementServer"] = "5.0" } type HostAccessRestrictedToManagementServerFault HostAccessRestrictedToManagementServer @@ -33411,23 +33346,23 @@ type HostActiveDirectory struct { // // You can specify // the following values: - // - `add`: - // Add the host to the domain. The ESX Server will use the - // `HostActiveDirectorySpec` information - // (domain, account user name and password) to call - // `HostActiveDirectoryAuthentication.JoinDomain_Task` and optionally - // configure smart card authentication by calling - // `HostActiveDirectoryAuthentication.DisableSmartCardAuthentication` - // and replacing the trust anchors with those provided. - // - `remove`: - // Remove the host from its current domain. - // The ESX Server will call - // `HostActiveDirectoryAuthentication.LeaveCurrentDomain_Task`, specifying - // True for the force parameter to delete - // existing permissions. - // `HostActiveDirectoryAuthentication.DisableSmartCardAuthentication` - // is also called if smart card authentication is enabled and trust - // anchors are removed. + // - `add`: + // Add the host to the domain. The ESX Server will use the + // `HostActiveDirectorySpec` information + // (domain, account user name and password) to call + // `HostActiveDirectoryAuthentication.JoinDomain_Task` and optionally + // configure smart card authentication by calling + // `HostActiveDirectoryAuthentication.DisableSmartCardAuthentication` + // and replacing the trust anchors with those provided. + // - `remove`: + // Remove the host from its current domain. + // The ESX Server will call + // `HostActiveDirectoryAuthentication.LeaveCurrentDomain_Task`, specifying + // True for the force parameter to delete + // existing permissions. + // `HostActiveDirectoryAuthentication.DisableSmartCardAuthentication` + // is also called if smart card authentication is enabled and trust + // anchors are removed. // // See also `HostConfigChangeOperation_enum`. ChangeOperation string `xml:"changeOperation" json:"changeOperation"` @@ -33438,7 +33373,6 @@ type HostActiveDirectory struct { func init() { t["HostActiveDirectory"] = reflect.TypeOf((*HostActiveDirectory)(nil)).Elem() - minAPIVersionForType["HostActiveDirectory"] = "4.1" } // The `HostActiveDirectoryInfo` data object describes ESX host @@ -33462,13 +33396,14 @@ type HostActiveDirectoryInfo struct { // // See `HostActiveDirectoryInfoDomainMembershipStatus_enum`. DomainMembershipStatus string `xml:"domainMembershipStatus,omitempty" json:"domainMembershipStatus,omitempty"` + // Deprecated as of vSphere API 8.0U3, and there is no replacement for it. + // // Whether local smart card authentication is enabled. - SmartCardAuthenticationEnabled *bool `xml:"smartCardAuthenticationEnabled" json:"smartCardAuthenticationEnabled,omitempty" vim:"6.0"` + SmartCardAuthenticationEnabled *bool `xml:"smartCardAuthenticationEnabled" json:"smartCardAuthenticationEnabled,omitempty"` } func init() { t["HostActiveDirectoryInfo"] = reflect.TypeOf((*HostActiveDirectoryInfo)(nil)).Elem() - minAPIVersionForType["HostActiveDirectoryInfo"] = "4.1" } // The `HostActiveDirectorySpec` data object defines @@ -33486,18 +33421,28 @@ type HostActiveDirectorySpec struct { // If set, the CAM server will be used to join the domain // and the userName and password fields // will be ignored. - CamServer string `xml:"camServer,omitempty" json:"camServer,omitempty" vim:"5.0"` + CamServer string `xml:"camServer,omitempty" json:"camServer,omitempty"` // Thumbprint for the SSL certficate of CAM server - Thumbprint string `xml:"thumbprint,omitempty" json:"thumbprint,omitempty" vim:"5.0"` + Thumbprint string `xml:"thumbprint,omitempty" json:"thumbprint,omitempty"` + // PEM-encoded certificate of the CAM server + // This field replaces `HostActiveDirectorySpec.thumbprint`. + // + // If both `HostActiveDirectorySpec.thumbprint` + // and `HostActiveDirectorySpec.certificate` fields are set, the `HostActiveDirectorySpec.thumbprint` + // should match the `HostActiveDirectorySpec.certificate`. + Certificate string `xml:"certificate,omitempty" json:"certificate,omitempty" vim:"8.0.3.0"` + // Deprecated as of vSphere API 8.0U3, and there is no replacement for it. + // // Support smart card authentication of local users. - SmartCardAuthenticationEnabled *bool `xml:"smartCardAuthenticationEnabled" json:"smartCardAuthenticationEnabled,omitempty" vim:"6.0"` + SmartCardAuthenticationEnabled *bool `xml:"smartCardAuthenticationEnabled" json:"smartCardAuthenticationEnabled,omitempty"` + // Deprecated as of vSphere API 8.0U3, and there is no replacement for it. + // // Trusted root certificates for smart cards. - SmartCardTrustAnchors []string `xml:"smartCardTrustAnchors,omitempty" json:"smartCardTrustAnchors,omitempty" vim:"6.0"` + SmartCardTrustAnchors []string `xml:"smartCardTrustAnchors,omitempty" json:"smartCardTrustAnchors,omitempty"` } func init() { t["HostActiveDirectorySpec"] = reflect.TypeOf((*HostActiveDirectorySpec)(nil)).Elem() - minAPIVersionForType["HostActiveDirectorySpec"] = "4.1" } // This event records that adding a host failed. @@ -33529,7 +33474,6 @@ type HostAdminDisableEvent struct { func init() { t["HostAdminDisableEvent"] = reflect.TypeOf((*HostAdminDisableEvent)(nil)).Elem() - minAPIVersionForType["HostAdminDisableEvent"] = "2.5" } // This event records that the administrator permission has been restored. @@ -33539,7 +33483,6 @@ type HostAdminEnableEvent struct { func init() { t["HostAdminEnableEvent"] = reflect.TypeOf((*HostAdminEnableEvent)(nil)).Elem() - minAPIVersionForType["HostAdminEnableEvent"] = "2.5" } // The `HostApplyProfile` data object provides access to subprofiles @@ -33588,12 +33531,11 @@ type HostApplyProfile struct { // in the list. UsergroupAccount []UserGroupProfile `xml:"usergroupAccount,omitempty" json:"usergroupAccount,omitempty"` // Authentication Configuration. - Authentication *AuthenticationProfile `xml:"authentication,omitempty" json:"authentication,omitempty" vim:"4.1"` + Authentication *AuthenticationProfile `xml:"authentication,omitempty" json:"authentication,omitempty"` } func init() { t["HostApplyProfile"] = reflect.TypeOf((*HostApplyProfile)(nil)).Elem() - minAPIVersionForType["HostApplyProfile"] = "4.0" } // Data object indicating a device instance has been allocated to a VM. @@ -33610,7 +33552,6 @@ type HostAssignableHardwareBinding struct { func init() { t["HostAssignableHardwareBinding"] = reflect.TypeOf((*HostAssignableHardwareBinding)(nil)).Elem() - minAPIVersionForType["HostAssignableHardwareBinding"] = "7.0" } // The AssignableHardwareConfig data object describes properties @@ -33624,7 +33565,6 @@ type HostAssignableHardwareConfig struct { func init() { t["HostAssignableHardwareConfig"] = reflect.TypeOf((*HostAssignableHardwareConfig)(nil)).Elem() - minAPIVersionForType["HostAssignableHardwareConfig"] = "7.0" } // An AttributeOverride provides a name-value pair that overrides @@ -33655,7 +33595,6 @@ type HostAssignableHardwareConfigAttributeOverride struct { func init() { t["HostAssignableHardwareConfigAttributeOverride"] = reflect.TypeOf((*HostAssignableHardwareConfigAttributeOverride)(nil)).Elem() - minAPIVersionForType["HostAssignableHardwareConfigAttributeOverride"] = "7.0" } // The `HostAuthenticationManagerInfo` data object provides @@ -33665,16 +33604,15 @@ type HostAuthenticationManagerInfo struct { // An array containing entries for local authentication and host // Active Directory authentication. - // - `HostLocalAuthenticationInfo` - Local authentication is always enabled. - // - `HostActiveDirectoryInfo` - Host Active Directory authentication information - // includes the name of the domain, membership status, - // and a list of other domains trusted by the membership domain. + // - `HostLocalAuthenticationInfo` - Local authentication is always enabled. + // - `HostActiveDirectoryInfo` - Host Active Directory authentication information + // includes the name of the domain, membership status, + // and a list of other domains trusted by the membership domain. AuthConfig []BaseHostAuthenticationStoreInfo `xml:"authConfig,typeattr" json:"authConfig"` } func init() { t["HostAuthenticationManagerInfo"] = reflect.TypeOf((*HostAuthenticationManagerInfo)(nil)).Elem() - minAPIVersionForType["HostAuthenticationManagerInfo"] = "4.1" } // The `HostAuthenticationStoreInfo` base class defines status information @@ -33683,15 +33621,14 @@ type HostAuthenticationStoreInfo struct { DynamicData // Indicates whether the authentication store is configured. - // - Host Active Directory authentication - enabled - // is True if the host is a member of a domain. - // - Local authentication - enabled is always True. + // - Host Active Directory authentication - enabled + // is True if the host is a member of a domain. + // - Local authentication - enabled is always True. Enabled bool `xml:"enabled" json:"enabled"` } func init() { t["HostAuthenticationStoreInfo"] = reflect.TypeOf((*HostAuthenticationStoreInfo)(nil)).Elem() - minAPIVersionForType["HostAuthenticationStoreInfo"] = "4.1" } // Contains the entire auto-start/auto-stop configuration. @@ -33720,14 +33657,14 @@ type HostBIOSInfo struct { // The release date for the BIOS. ReleaseDate *time.Time `xml:"releaseDate" json:"releaseDate,omitempty"` // The vendor for the BIOS. - Vendor string `xml:"vendor,omitempty" json:"vendor,omitempty" vim:"6.5"` + Vendor string `xml:"vendor,omitempty" json:"vendor,omitempty"` // BIOS Major Release - MajorRelease int32 `xml:"majorRelease,omitempty" json:"majorRelease,omitempty" vim:"6.5"` + MajorRelease int32 `xml:"majorRelease,omitempty" json:"majorRelease,omitempty"` // "BIOS Minor Release - MinorRelease int32 `xml:"minorRelease,omitempty" json:"minorRelease,omitempty" vim:"6.5"` + MinorRelease int32 `xml:"minorRelease,omitempty" json:"minorRelease,omitempty"` FirmwareMajorRelease int32 `xml:"firmwareMajorRelease,omitempty" json:"firmwareMajorRelease,omitempty"` // Embedded Controller Firmware Minor Release - FirmwareMinorRelease int32 `xml:"firmwareMinorRelease,omitempty" json:"firmwareMinorRelease,omitempty" vim:"6.5"` + FirmwareMinorRelease int32 `xml:"firmwareMinorRelease,omitempty" json:"firmwareMinorRelease,omitempty"` // Firmware Type of the host. // // The set of supported values is described @@ -33770,7 +33707,6 @@ type HostBootDevice struct { func init() { t["HostBootDevice"] = reflect.TypeOf((*HostBootDevice)(nil)).Elem() - minAPIVersionForType["HostBootDevice"] = "2.5" } // This data object represents the boot device information of the host. @@ -33788,7 +33724,6 @@ type HostBootDeviceInfo struct { func init() { t["HostBootDeviceInfo"] = reflect.TypeOf((*HostBootDeviceInfo)(nil)).Elem() - minAPIVersionForType["HostBootDeviceInfo"] = "2.5" } // Host solid state drive cache configuration information. @@ -33806,7 +33741,6 @@ type HostCacheConfigurationInfo struct { func init() { t["HostCacheConfigurationInfo"] = reflect.TypeOf((*HostCacheConfigurationInfo)(nil)).Elem() - minAPIVersionForType["HostCacheConfigurationInfo"] = "5.0" } // Host cache configuration specification. @@ -33827,7 +33761,6 @@ type HostCacheConfigurationSpec struct { func init() { t["HostCacheConfigurationSpec"] = reflect.TypeOf((*HostCacheConfigurationSpec)(nil)).Elem() - minAPIVersionForType["HostCacheConfigurationSpec"] = "5.0" } // Specifies the capabilities of the particular host. @@ -33847,7 +33780,7 @@ type HostCapability struct { // `ResourcePool.UpdateConfig`, // `ResourcePool.UpdateChildResourceConfiguration` // cannot be used for changing the cpu/memory resource configurations. - CpuMemoryResourceConfigurationSupported bool `xml:"cpuMemoryResourceConfigurationSupported" json:"cpuMemoryResourceConfigurationSupported" vim:"2.5"` + CpuMemoryResourceConfigurationSupported bool `xml:"cpuMemoryResourceConfigurationSupported" json:"cpuMemoryResourceConfigurationSupported"` // Flag indicating whether rebooting the host is supported. RebootSupported bool `xml:"rebootSupported" json:"rebootSupported"` // Flag indicating whether the host can be powered off @@ -33856,12 +33789,12 @@ type HostCapability struct { VmotionSupported bool `xml:"vmotionSupported" json:"vmotionSupported"` // Flag indicating whether you can put the host in a power down // state, from which it can be powered up automatically. - StandbySupported bool `xml:"standbySupported" json:"standbySupported" vim:"2.5"` + StandbySupported bool `xml:"standbySupported" json:"standbySupported"` // Flag indicating whether the host supports // IPMI (Intelligent Platform Management Interface). // // XXX - Make ipmiSupported optional until there is a compatible hostagent. - IpmiSupported *bool `xml:"ipmiSupported" json:"ipmiSupported,omitempty" vim:"4.0"` + IpmiSupported *bool `xml:"ipmiSupported" json:"ipmiSupported,omitempty"` // The maximum number of virtual machines that can exist on this host. // // If this capability is not set, the number of virtual machines is @@ -33885,7 +33818,7 @@ type HostCapability struct { // `HostSystem` in this case. // // If this capability is not set, the number is unknown. - MaxRegisteredVMs int32 `xml:"maxRegisteredVMs,omitempty" json:"maxRegisteredVMs,omitempty" vim:"5.1"` + MaxRegisteredVMs int32 `xml:"maxRegisteredVMs,omitempty" json:"maxRegisteredVMs,omitempty"` // Flag indicating whether datastore principal user // is supported on the host. DatastorePrincipalSupported bool `xml:"datastorePrincipalSupported" json:"datastorePrincipalSupported"` @@ -33921,22 +33854,22 @@ type HostCapability struct { // after the relocate. // 3\) The source and destination hosts are the same product // version. - RestrictedSnapshotRelocateSupported bool `xml:"restrictedSnapshotRelocateSupported" json:"restrictedSnapshotRelocateSupported" vim:"2.5"` + RestrictedSnapshotRelocateSupported bool `xml:"restrictedSnapshotRelocateSupported" json:"restrictedSnapshotRelocateSupported"` // Flag indicating whether virtual machine execution on this host involves // a swapfile for each virtual machine. // // If true, the swapfile placement // for a powered-on virtual machine is advertised in its FileLayout by // the `swapFile` property. - PerVmSwapFiles bool `xml:"perVmSwapFiles" json:"perVmSwapFiles" vim:"2.5"` + PerVmSwapFiles bool `xml:"perVmSwapFiles" json:"perVmSwapFiles"` // Flag indicating whether the host supports selecting a datastore that // that may be used to store virtual machine swapfiles. - LocalSwapDatastoreSupported bool `xml:"localSwapDatastoreSupported" json:"localSwapDatastoreSupported" vim:"2.5"` + LocalSwapDatastoreSupported bool `xml:"localSwapDatastoreSupported" json:"localSwapDatastoreSupported"` // Flag indicating whether the host supports participating in a VMotion // where the virtual machine swapfile is not visible to the destination. - UnsharedSwapVMotionSupported bool `xml:"unsharedSwapVMotionSupported" json:"unsharedSwapVMotionSupported" vim:"2.5"` + UnsharedSwapVMotionSupported bool `xml:"unsharedSwapVMotionSupported" json:"unsharedSwapVMotionSupported"` // Flag indicating whether background snapshots are supported on this host. - BackgroundSnapshotsSupported bool `xml:"backgroundSnapshotsSupported" json:"backgroundSnapshotsSupported" vim:"2.5"` + BackgroundSnapshotsSupported bool `xml:"backgroundSnapshotsSupported" json:"backgroundSnapshotsSupported"` // Flag to indicate whether the server returns unit numbers in a // pre-assigned range for devices on the PCI bus. // @@ -33951,7 +33884,7 @@ type HostCapability struct { // between devices of the same type. // `VirtualDevice.unitNumber` // This property is only available for devices on the pci controller. - PreAssignedPCIUnitNumbersSupported bool `xml:"preAssignedPCIUnitNumbersSupported" json:"preAssignedPCIUnitNumbersSupported" vim:"2.5"` + PreAssignedPCIUnitNumbersSupported bool `xml:"preAssignedPCIUnitNumbersSupported" json:"preAssignedPCIUnitNumbersSupported"` // Indicates whether the screenshot retrival over https is supported for this host's // virtual machines. // @@ -33964,43 +33897,43 @@ type HostCapability struct { // The client must use an authenticated session with privilege // VirtualMachine.Interact.ConsoleInteract on the requested virtual machine or, // in the case of a snapshot, the virtual machine associated with that snapshot. - ScreenshotSupported bool `xml:"screenshotSupported" json:"screenshotSupported" vim:"2.5"` + ScreenshotSupported bool `xml:"screenshotSupported" json:"screenshotSupported"` // Indicates whether scaling is supported for screenshots retrieved over https. // // If true, screenshot retrieval supports the additional optional // parameters 'width' and 'height'. After cropping, the returned image will be scaled // to these dimensions. If only one of these parameters is specified, default behavior // is to return an image roughly proportional to the source image. - ScaledScreenshotSupported bool `xml:"scaledScreenshotSupported" json:"scaledScreenshotSupported" vim:"2.5"` + ScaledScreenshotSupported bool `xml:"scaledScreenshotSupported" json:"scaledScreenshotSupported"` // Indicates whether the storage of a powered-on virtual machine may be // relocated. - StorageVMotionSupported *bool `xml:"storageVMotionSupported" json:"storageVMotionSupported,omitempty" vim:"4.0"` + StorageVMotionSupported *bool `xml:"storageVMotionSupported" json:"storageVMotionSupported,omitempty"` // Indicates whether the storage of a powered-on virtual machine may be // relocated while simultaneously changing the execution host of the // virtual machine. - VmotionWithStorageVMotionSupported *bool `xml:"vmotionWithStorageVMotionSupported" json:"vmotionWithStorageVMotionSupported,omitempty" vim:"4.0"` + VmotionWithStorageVMotionSupported *bool `xml:"vmotionWithStorageVMotionSupported" json:"vmotionWithStorageVMotionSupported,omitempty"` // Indicates whether the network of a powered-on virtual machine can be // changed while simultaneously changing the execution host of the // virtual machine. - VmotionAcrossNetworkSupported *bool `xml:"vmotionAcrossNetworkSupported" json:"vmotionAcrossNetworkSupported,omitempty" vim:"5.5"` + VmotionAcrossNetworkSupported *bool `xml:"vmotionAcrossNetworkSupported" json:"vmotionAcrossNetworkSupported,omitempty"` // Maximum number of migrating disks allowed of a migrating VM during SVMotion. // // If this capability is not set, then the maximum is considered to be 64. - MaxNumDisksSVMotion int32 `xml:"maxNumDisksSVMotion,omitempty" json:"maxNumDisksSVMotion,omitempty" vim:"6.0"` + MaxNumDisksSVMotion int32 `xml:"maxNumDisksSVMotion,omitempty" json:"maxNumDisksSVMotion,omitempty"` // Maximum version of vDiskVersion supported by this host. // // If this capability is not set, then the maximum is considered to be 6. MaxVirtualDiskDescVersionSupported int32 `xml:"maxVirtualDiskDescVersionSupported,omitempty" json:"maxVirtualDiskDescVersionSupported,omitempty" vim:"8.0.1.0"` // Indicates whether a dedicated nic can be selected for vSphere Replication // LWD traffic, i.e., from the primary host to the VR server. - HbrNicSelectionSupported *bool `xml:"hbrNicSelectionSupported" json:"hbrNicSelectionSupported,omitempty" vim:"5.1"` + HbrNicSelectionSupported *bool `xml:"hbrNicSelectionSupported" json:"hbrNicSelectionSupported,omitempty"` // Indicates whether a dedicated nic can be selected for vSphere Replication // NFC traffic, i.e., from the VR server to the secondary host. - VrNfcNicSelectionSupported *bool `xml:"vrNfcNicSelectionSupported" json:"vrNfcNicSelectionSupported,omitempty" vim:"6.0"` + VrNfcNicSelectionSupported *bool `xml:"vrNfcNicSelectionSupported" json:"vrNfcNicSelectionSupported,omitempty"` // Deprecated as of vSphere API 6.0. // // Indicates whether this host supports record and replay - RecordReplaySupported *bool `xml:"recordReplaySupported" json:"recordReplaySupported,omitempty" vim:"4.0"` + RecordReplaySupported *bool `xml:"recordReplaySupported" json:"recordReplaySupported,omitempty"` // Deprecated as of vSphere API 6.0. // // Indicates whether this host supports Fault Tolerance @@ -34021,7 +33954,7 @@ type HostCapability struct { // contain values for this property when some other property on the DataObject changes. // If this update is a result of a call to WaitForUpdatesEx with a non-empty // version parameter, the value for this property may not be current. - FtSupported *bool `xml:"ftSupported" json:"ftSupported,omitempty" vim:"4.0"` + FtSupported *bool `xml:"ftSupported" json:"ftSupported,omitempty"` // Deprecated as of vSphere API 4.1, use // `HostCapability.replayCompatibilityIssues`. // @@ -34030,7 +33963,7 @@ type HostCapability struct { // // `HostReplayUnsupportedReason_enum` // represents the set of possible values. - ReplayUnsupportedReason string `xml:"replayUnsupportedReason,omitempty" json:"replayUnsupportedReason,omitempty" vim:"4.0"` + ReplayUnsupportedReason string `xml:"replayUnsupportedReason,omitempty" json:"replayUnsupportedReason,omitempty"` // Deprecated as of vSphere API 6.0. // // For a host which doesn't support replay, indicates all the reasons @@ -34038,9 +33971,9 @@ type HostCapability struct { // // `HostReplayUnsupportedReason_enum` // lists the set of possible values. - ReplayCompatibilityIssues []string `xml:"replayCompatibilityIssues,omitempty" json:"replayCompatibilityIssues,omitempty" vim:"4.1"` + ReplayCompatibilityIssues []string `xml:"replayCompatibilityIssues,omitempty" json:"replayCompatibilityIssues,omitempty"` // Indicates whether this host supports smp fault tolerance - SmpFtSupported *bool `xml:"smpFtSupported" json:"smpFtSupported,omitempty" vim:"6.0"` + SmpFtSupported *bool `xml:"smpFtSupported" json:"smpFtSupported,omitempty"` // Deprecated as of vSphere API 6.0. // // For a host which doesn't support Fault Tolerance, indicates all the reasons @@ -34048,17 +33981,17 @@ type HostCapability struct { // // `HostCapabilityFtUnsupportedReason_enum` // lists the set of possible values. - FtCompatibilityIssues []string `xml:"ftCompatibilityIssues,omitempty" json:"ftCompatibilityIssues,omitempty" vim:"4.1"` + FtCompatibilityIssues []string `xml:"ftCompatibilityIssues,omitempty" json:"ftCompatibilityIssues,omitempty"` // For a host which doesn't support smp fault tolerance, indicates all the // reasons for the incompatibility. // // `HostCapabilityFtUnsupportedReason_enum` lists the set of possible // values. - SmpFtCompatibilityIssues []string `xml:"smpFtCompatibilityIssues,omitempty" json:"smpFtCompatibilityIssues,omitempty" vim:"6.0"` + SmpFtCompatibilityIssues []string `xml:"smpFtCompatibilityIssues,omitempty" json:"smpFtCompatibilityIssues,omitempty"` // The maximum number of vCPUs allowed for a fault-tolerant virtual machine. - MaxVcpusPerFtVm int32 `xml:"maxVcpusPerFtVm,omitempty" json:"maxVcpusPerFtVm,omitempty" vim:"6.0"` + MaxVcpusPerFtVm int32 `xml:"maxVcpusPerFtVm,omitempty" json:"maxVcpusPerFtVm,omitempty"` // Flag indicating whether this host supports SSL thumbprint authentication - LoginBySSLThumbprintSupported *bool `xml:"loginBySSLThumbprintSupported" json:"loginBySSLThumbprintSupported,omitempty" vim:"4.0"` + LoginBySSLThumbprintSupported *bool `xml:"loginBySSLThumbprintSupported" json:"loginBySSLThumbprintSupported,omitempty"` // Indicates whether or not cloning a virtual machine from a snapshot // point is allowed. // @@ -34067,7 +34000,7 @@ type HostCapability struct { // destination host for the clone. // // See also `VirtualMachineCloneSpec.snapshot`. - CloneFromSnapshotSupported *bool `xml:"cloneFromSnapshotSupported" json:"cloneFromSnapshotSupported,omitempty" vim:"4.0"` + CloneFromSnapshotSupported *bool `xml:"cloneFromSnapshotSupported" json:"cloneFromSnapshotSupported,omitempty"` // Flag indicating whether explicitly creating arbirary configurations of // delta disk backings is supported. // @@ -34088,17 +34021,20 @@ type HostCapability struct { // virtual machines will not affect the operation of the other virtual machine. // // See also `VirtualDiskSparseVer1BackingInfo.parent`, `VirtualDiskSparseVer2BackingInfo.parent`, `VirtualDiskFlatVer1BackingInfo.parent`, `VirtualDiskFlatVer2BackingInfo.parent`, `VirtualDiskRawDiskMappingVer1BackingInfo.parent`, `VirtualMachine.PromoteDisks_Task`, `VirtualMachineRelocateSpec.diskMoveType`, `VirtualMachineRelocateSpecDiskLocator.diskMoveType`. - DeltaDiskBackingsSupported *bool `xml:"deltaDiskBackingsSupported" json:"deltaDiskBackingsSupported,omitempty" vim:"4.0"` + DeltaDiskBackingsSupported *bool `xml:"deltaDiskBackingsSupported" json:"deltaDiskBackingsSupported,omitempty"` // Indicates whether network traffic shaping on a // per virtual machine basis is supported. - PerVMNetworkTrafficShapingSupported *bool `xml:"perVMNetworkTrafficShapingSupported" json:"perVMNetworkTrafficShapingSupported,omitempty" vim:"2.5 U2"` + PerVMNetworkTrafficShapingSupported *bool `xml:"perVMNetworkTrafficShapingSupported" json:"perVMNetworkTrafficShapingSupported,omitempty"` // Flag indicating whether this host supports the integrity measurement using // a TPM device. - TpmSupported *bool `xml:"tpmSupported" json:"tpmSupported,omitempty" vim:"4.0"` + TpmSupported *bool `xml:"tpmSupported" json:"tpmSupported,omitempty"` // TPM version if supported by this host. - TpmVersion string `xml:"tpmVersion,omitempty" json:"tpmVersion,omitempty" vim:"6.7"` + TpmVersion string `xml:"tpmVersion,omitempty" json:"tpmVersion,omitempty"` // Flag indicating whether Intel TXT is enabled on this host. - TxtEnabled *bool `xml:"txtEnabled" json:"txtEnabled,omitempty" vim:"6.7"` + // + // TPM attestation may be used to definitively determine the Intel TXT + // Measured Launch Environment (MLE) details. + TxtEnabled *bool `xml:"txtEnabled" json:"txtEnabled,omitempty"` // Deprecated as of vSphere API 6.5 use // `featureCapability`. // @@ -34109,13 +34045,15 @@ type HostCapability struct { // licensing. For any feature marked '-', reference the // `cpuFeature` array of the host's // HardwareInfo to determine the correct value. - SupportedCpuFeature []HostCpuIdInfo `xml:"supportedCpuFeature,omitempty" json:"supportedCpuFeature,omitempty" vim:"4.0"` + SupportedCpuFeature []HostCpuIdInfo `xml:"supportedCpuFeature,omitempty" json:"supportedCpuFeature,omitempty"` // Indicates whether the host supports configuring hardware // virtualization (HV) support for virtual machines. - VirtualExecUsageSupported *bool `xml:"virtualExecUsageSupported" json:"virtualExecUsageSupported,omitempty" vim:"4.0"` + VirtualExecUsageSupported *bool `xml:"virtualExecUsageSupported" json:"virtualExecUsageSupported,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Indicates whether the host supports storage I/O resource // management. - StorageIORMSupported *bool `xml:"storageIORMSupported" json:"storageIORMSupported,omitempty" vim:"4.1"` + StorageIORMSupported *bool `xml:"storageIORMSupported" json:"storageIORMSupported,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer supported and // there is no replacement. // @@ -34129,7 +34067,7 @@ type HostCapability struct { // `HostCapability.vmDirectPathGen2UnsupportedReasonExtended`. // // See also `PhysicalNic.vmDirectPathGen2Supported`. - VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported" json:"vmDirectPathGen2Supported,omitempty" vim:"4.1"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported" json:"vmDirectPathGen2Supported,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer supported and // there is no replacement. // @@ -34147,19 +34085,19 @@ type HostCapability struct { // If the reason "hostNptIncompatibleProduct" is provided, then that will // be the only provided reason, as the host software is incapable of // providing additional information. - VmDirectPathGen2UnsupportedReason []string `xml:"vmDirectPathGen2UnsupportedReason,omitempty" json:"vmDirectPathGen2UnsupportedReason,omitempty" vim:"4.1"` + VmDirectPathGen2UnsupportedReason []string `xml:"vmDirectPathGen2UnsupportedReason,omitempty" json:"vmDirectPathGen2UnsupportedReason,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer supported and // there is no replacement. // // If `HostCapability.vmDirectPathGen2Supported` is false, this property may // contain an explanation provided by the platform, beyond the reasons (if // any) enumerated in `HostCapability.vmDirectPathGen2UnsupportedReason`. - VmDirectPathGen2UnsupportedReasonExtended string `xml:"vmDirectPathGen2UnsupportedReasonExtended,omitempty" json:"vmDirectPathGen2UnsupportedReasonExtended,omitempty" vim:"4.1"` + VmDirectPathGen2UnsupportedReasonExtended string `xml:"vmDirectPathGen2UnsupportedReasonExtended,omitempty" json:"vmDirectPathGen2UnsupportedReasonExtended,omitempty"` // List of VMFS major versions supported by the host. - SupportedVmfsMajorVersion []int32 `xml:"supportedVmfsMajorVersion,omitempty" json:"supportedVmfsMajorVersion,omitempty" vim:"5.0"` + SupportedVmfsMajorVersion []int32 `xml:"supportedVmfsMajorVersion,omitempty" json:"supportedVmfsMajorVersion,omitempty"` // Indicates whether the host supports vStorage Hardware // acceleration. - VStorageCapable *bool `xml:"vStorageCapable" json:"vStorageCapable,omitempty" vim:"4.1"` + VStorageCapable *bool `xml:"vStorageCapable" json:"vStorageCapable,omitempty"` // Indicates whether this host supports unrestricted relocation of virtual // machines with snapshots. // @@ -34168,20 +34106,20 @@ type HostCapability struct { // restrict the layout of snapshot files or disks of the virtual machine, nor // its power state. If the virtual machine is powered on, a storage vmotion // will be performed to relocate its snapshots and disks. - SnapshotRelayoutSupported *bool `xml:"snapshotRelayoutSupported" json:"snapshotRelayoutSupported,omitempty" vim:"5.0"` + SnapshotRelayoutSupported *bool `xml:"snapshotRelayoutSupported" json:"snapshotRelayoutSupported,omitempty"` // Indicates whether this host supports ip address based restrictions in // the firewall configuration. - FirewallIpRulesSupported *bool `xml:"firewallIpRulesSupported" json:"firewallIpRulesSupported,omitempty" vim:"5.0"` + FirewallIpRulesSupported *bool `xml:"firewallIpRulesSupported" json:"firewallIpRulesSupported,omitempty"` // Indicates whether this host supports package information in service // configuration. - ServicePackageInfoSupported *bool `xml:"servicePackageInfoSupported" json:"servicePackageInfoSupported,omitempty" vim:"5.0"` + ServicePackageInfoSupported *bool `xml:"servicePackageInfoSupported" json:"servicePackageInfoSupported,omitempty"` // The maximum number of virtual machines that can be run on the host. // // An unset value indicates that the value could not be obtained. In contrast // to `HostCapability.maxRunningVMs`, this value is the minimum of (i) the maximum // number supported by the hardware and (ii) the maximum number permitted by // the host license. - MaxHostRunningVms int32 `xml:"maxHostRunningVms,omitempty" json:"maxHostRunningVms,omitempty" vim:"5.0"` + MaxHostRunningVms int32 `xml:"maxHostRunningVms,omitempty" json:"maxHostRunningVms,omitempty"` // The maximum number of virtual CPUs that can be run on the host. // // An unset @@ -34189,201 +34127,203 @@ type HostCapability struct { // `HostCapability.maxSupportedVcpus`, this value is the minimum of (i) the maximum // number supported by the hardware and (ii) the maximum number permitted by // the host license. - MaxHostSupportedVcpus int32 `xml:"maxHostSupportedVcpus,omitempty" json:"maxHostSupportedVcpus,omitempty" vim:"5.0"` + MaxHostSupportedVcpus int32 `xml:"maxHostSupportedVcpus,omitempty" json:"maxHostSupportedVcpus,omitempty"` // Indicates whether the host is capable of mounting/unmounting // VMFS datastores. - VmfsDatastoreMountCapable *bool `xml:"vmfsDatastoreMountCapable" json:"vmfsDatastoreMountCapable,omitempty" vim:"5.0"` + VmfsDatastoreMountCapable *bool `xml:"vmfsDatastoreMountCapable" json:"vmfsDatastoreMountCapable,omitempty"` // Indicates whether the host is capable of accessing a VMFS disk // when there are eight or more hosts accessing the disk already. - EightPlusHostVmfsSharedAccessSupported *bool `xml:"eightPlusHostVmfsSharedAccessSupported" json:"eightPlusHostVmfsSharedAccessSupported,omitempty" vim:"5.1"` + EightPlusHostVmfsSharedAccessSupported *bool `xml:"eightPlusHostVmfsSharedAccessSupported" json:"eightPlusHostVmfsSharedAccessSupported,omitempty"` // Indicates whether the host supports nested hardware-assisted virtualization. - NestedHVSupported *bool `xml:"nestedHVSupported" json:"nestedHVSupported,omitempty" vim:"5.1"` + NestedHVSupported *bool `xml:"nestedHVSupported" json:"nestedHVSupported,omitempty"` // Indicates whether the host supports virtual CPU performance counters. - VPMCSupported *bool `xml:"vPMCSupported" json:"vPMCSupported,omitempty" vim:"5.1"` + VPMCSupported *bool `xml:"vPMCSupported" json:"vPMCSupported,omitempty"` // Indicates whether the host supports VMCI for communication // between virtual machines. - InterVMCommunicationThroughVMCISupported *bool `xml:"interVMCommunicationThroughVMCISupported" json:"interVMCommunicationThroughVMCISupported,omitempty" vim:"5.1"` + InterVMCommunicationThroughVMCISupported *bool `xml:"interVMCommunicationThroughVMCISupported" json:"interVMCommunicationThroughVMCISupported,omitempty"` // Indicates whether the host supports scheduled hardware upgrades. // // See also `VirtualMachineConfigInfo.scheduledHardwareUpgradeInfo`. - ScheduledHardwareUpgradeSupported *bool `xml:"scheduledHardwareUpgradeSupported" json:"scheduledHardwareUpgradeSupported,omitempty" vim:"5.1"` + ScheduledHardwareUpgradeSupported *bool `xml:"scheduledHardwareUpgradeSupported" json:"scheduledHardwareUpgradeSupported,omitempty"` // Indicated whether the host supports feature capabilities // for EVC mode. - FeatureCapabilitiesSupported *bool `xml:"featureCapabilitiesSupported" json:"featureCapabilitiesSupported,omitempty" vim:"5.1"` + FeatureCapabilitiesSupported *bool `xml:"featureCapabilitiesSupported" json:"featureCapabilitiesSupported,omitempty"` // Indicates whether the host supports latency sensitivity for the // virtual machines. - LatencySensitivitySupported *bool `xml:"latencySensitivitySupported" json:"latencySensitivitySupported,omitempty" vim:"5.1"` + LatencySensitivitySupported *bool `xml:"latencySensitivitySupported" json:"latencySensitivitySupported,omitempty"` // Indicates that host supports Object-based Storage System and // Storage-Profile based disk provisioning. - StoragePolicySupported *bool `xml:"storagePolicySupported" json:"storagePolicySupported,omitempty" vim:"5.5"` + StoragePolicySupported *bool `xml:"storagePolicySupported" json:"storagePolicySupported,omitempty"` // Indicates if 3D hardware acceleration for virtual machines is supported. - Accel3dSupported *bool `xml:"accel3dSupported" json:"accel3dSupported,omitempty" vim:"5.1"` + Accel3dSupported *bool `xml:"accel3dSupported" json:"accel3dSupported,omitempty"` // Indicates that this host uses a reliable memory aware allocation policy. - ReliableMemoryAware *bool `xml:"reliableMemoryAware" json:"reliableMemoryAware,omitempty" vim:"5.5"` + ReliableMemoryAware *bool `xml:"reliableMemoryAware" json:"reliableMemoryAware,omitempty"` // Indicates whether the host supports Multiple Instance TCP/IP stack - MultipleNetworkStackInstanceSupported *bool `xml:"multipleNetworkStackInstanceSupported" json:"multipleNetworkStackInstanceSupported,omitempty" vim:"5.5"` + MultipleNetworkStackInstanceSupported *bool `xml:"multipleNetworkStackInstanceSupported" json:"multipleNetworkStackInstanceSupported,omitempty"` // Indicates whether the message bus proxy is supported - MessageBusProxySupported *bool `xml:"messageBusProxySupported" json:"messageBusProxySupported,omitempty" vim:"6.0"` + MessageBusProxySupported *bool `xml:"messageBusProxySupported" json:"messageBusProxySupported,omitempty"` // Indicates whether the host supports VSAN functionality. // // See also `HostVsanSystem`. - VsanSupported *bool `xml:"vsanSupported" json:"vsanSupported,omitempty" vim:"5.5"` + VsanSupported *bool `xml:"vsanSupported" json:"vsanSupported,omitempty"` // Indicates whether the host supports vFlash. - VFlashSupported *bool `xml:"vFlashSupported" json:"vFlashSupported,omitempty" vim:"5.5"` + VFlashSupported *bool `xml:"vFlashSupported" json:"vFlashSupported,omitempty"` // Whether this host supports HostAccessManager for controlling direct // access to the host and for better lockdown mode management. - HostAccessManagerSupported *bool `xml:"hostAccessManagerSupported" json:"hostAccessManagerSupported,omitempty" vim:"6.0"` + HostAccessManagerSupported *bool `xml:"hostAccessManagerSupported" json:"hostAccessManagerSupported,omitempty"` // Indicates whether a dedicated nic can be selected for vSphere Provisioning // NFC traffic. - ProvisioningNicSelectionSupported *bool `xml:"provisioningNicSelectionSupported" json:"provisioningNicSelectionSupported,omitempty" vim:"6.0"` + ProvisioningNicSelectionSupported *bool `xml:"provisioningNicSelectionSupported" json:"provisioningNicSelectionSupported,omitempty"` // Whether this host supports NFS41 file systems. - Nfs41Supported *bool `xml:"nfs41Supported" json:"nfs41Supported,omitempty" vim:"6.0"` + Nfs41Supported *bool `xml:"nfs41Supported" json:"nfs41Supported,omitempty"` // Whether this host support NFS41 Kerberos 5\* security type. - Nfs41Krb5iSupported *bool `xml:"nfs41Krb5iSupported" json:"nfs41Krb5iSupported,omitempty" vim:"6.5"` + Nfs41Krb5iSupported *bool `xml:"nfs41Krb5iSupported" json:"nfs41Krb5iSupported,omitempty"` // Indicates whether turning on/off local disk LED is supported // on the host. // // See also `HostStorageSystem.TurnDiskLocatorLedOn_Task`, `HostStorageSystem.TurnDiskLocatorLedOff_Task`. - TurnDiskLocatorLedSupported *bool `xml:"turnDiskLocatorLedSupported" json:"turnDiskLocatorLedSupported,omitempty" vim:"6.0"` + TurnDiskLocatorLedSupported *bool `xml:"turnDiskLocatorLedSupported" json:"turnDiskLocatorLedSupported,omitempty"` // Indicates whether this host supports VirtualVolume based Datastore. - VirtualVolumeDatastoreSupported *bool `xml:"virtualVolumeDatastoreSupported" json:"virtualVolumeDatastoreSupported,omitempty" vim:"6.0"` + VirtualVolumeDatastoreSupported *bool `xml:"virtualVolumeDatastoreSupported" json:"virtualVolumeDatastoreSupported,omitempty"` // Indicates whether mark disk as SSD or Non-SSD is supported // on the host. // // See also `HostStorageSystem.MarkAsSsd_Task`, `HostStorageSystem.MarkAsNonSsd_Task`. - MarkAsSsdSupported *bool `xml:"markAsSsdSupported" json:"markAsSsdSupported,omitempty" vim:"6.0"` + MarkAsSsdSupported *bool `xml:"markAsSsdSupported" json:"markAsSsdSupported,omitempty"` // Indicates whether mark disk as local or remote is supported // on the host. // // See also `HostStorageSystem.MarkAsLocal_Task`, `HostStorageSystem.MarkAsNonLocal_Task`. - MarkAsLocalSupported *bool `xml:"markAsLocalSupported" json:"markAsLocalSupported,omitempty" vim:"6.0"` + MarkAsLocalSupported *bool `xml:"markAsLocalSupported" json:"markAsLocalSupported,omitempty"` + // Deprecated as of vSphere API 8.0U3, and there is no replacement for it. + // // Indicates whether this host supports local two-factor user // authentication using smart cards. // // See also `HostActiveDirectoryAuthentication.EnableSmartCardAuthentication`. - SmartCardAuthenticationSupported *bool `xml:"smartCardAuthenticationSupported" json:"smartCardAuthenticationSupported,omitempty" vim:"6.0"` + SmartCardAuthenticationSupported *bool `xml:"smartCardAuthenticationSupported" json:"smartCardAuthenticationSupported,omitempty"` // Indicates whether this host supports persistent memory. // // If value is not specified, it should be considered as not supported. - PMemSupported *bool `xml:"pMemSupported" json:"pMemSupported,omitempty" vim:"6.7"` + PMemSupported *bool `xml:"pMemSupported" json:"pMemSupported,omitempty"` // Indicates whether this host supports snapshots for VMs with virtual // devices backed by persistent memory. // // If value is not specified, it should be considered as not supported. - PMemSnapshotSupported *bool `xml:"pMemSnapshotSupported" json:"pMemSnapshotSupported,omitempty" vim:"6.7"` + PMemSnapshotSupported *bool `xml:"pMemSnapshotSupported" json:"pMemSnapshotSupported,omitempty"` // Flag indicating whether Cryptographer feature is supported. - CryptoSupported *bool `xml:"cryptoSupported" json:"cryptoSupported,omitempty" vim:"6.5"` + CryptoSupported *bool `xml:"cryptoSupported" json:"cryptoSupported,omitempty"` // Indicates whether this host supports granular datastore cache update. // // If value is not specified, it should be considered as not supported. - OneKVolumeAPIsSupported *bool `xml:"oneKVolumeAPIsSupported" json:"oneKVolumeAPIsSupported,omitempty" vim:"6.5"` + OneKVolumeAPIsSupported *bool `xml:"oneKVolumeAPIsSupported" json:"oneKVolumeAPIsSupported,omitempty"` // Flag indicating whether default gateway can be configured on a // vmkernel nic. - GatewayOnNicSupported *bool `xml:"gatewayOnNicSupported" json:"gatewayOnNicSupported,omitempty" vim:"6.5"` + GatewayOnNicSupported *bool `xml:"gatewayOnNicSupported" json:"gatewayOnNicSupported,omitempty"` // Deprecated as of vSphere API 8.0, and there is no replacement for it. // // Indicate whether this host supports UPIT - UpitSupported *bool `xml:"upitSupported" json:"upitSupported,omitempty" vim:"6.5"` + UpitSupported *bool `xml:"upitSupported" json:"upitSupported,omitempty"` // Indicates whether this host supports hardware-based MMU virtualization. - CpuHwMmuSupported *bool `xml:"cpuHwMmuSupported" json:"cpuHwMmuSupported,omitempty" vim:"6.5"` + CpuHwMmuSupported *bool `xml:"cpuHwMmuSupported" json:"cpuHwMmuSupported,omitempty"` // Indicates whether this host supports encrypted vMotion. - EncryptedVMotionSupported *bool `xml:"encryptedVMotionSupported" json:"encryptedVMotionSupported,omitempty" vim:"6.5"` + EncryptedVMotionSupported *bool `xml:"encryptedVMotionSupported" json:"encryptedVMotionSupported,omitempty"` // Indicates whether this host supports changing the encryption state // of a virtual disk when the disk is being added or removed from a // virtual machine configuration. - EncryptionChangeOnAddRemoveSupported *bool `xml:"encryptionChangeOnAddRemoveSupported" json:"encryptionChangeOnAddRemoveSupported,omitempty" vim:"6.5"` + EncryptionChangeOnAddRemoveSupported *bool `xml:"encryptionChangeOnAddRemoveSupported" json:"encryptionChangeOnAddRemoveSupported,omitempty"` // Indicates whether this host supports changing the encryption state // of a virtual machine, or virtual disk, while the virtual machine // is powered on. - EncryptionHotOperationSupported *bool `xml:"encryptionHotOperationSupported" json:"encryptionHotOperationSupported,omitempty" vim:"6.5"` + EncryptionHotOperationSupported *bool `xml:"encryptionHotOperationSupported" json:"encryptionHotOperationSupported,omitempty"` // Indicates whether this host supports changing the encryption state // state of a virtual machine, or virtual disk, while the virtual // machine has snapshots present. - EncryptionWithSnapshotsSupported *bool `xml:"encryptionWithSnapshotsSupported" json:"encryptionWithSnapshotsSupported,omitempty" vim:"6.5"` + EncryptionWithSnapshotsSupported *bool `xml:"encryptionWithSnapshotsSupported" json:"encryptionWithSnapshotsSupported,omitempty"` // Indicates whether this host supports enabling Fault Tolerance on // encrypted virtual machines. - EncryptionFaultToleranceSupported *bool `xml:"encryptionFaultToleranceSupported" json:"encryptionFaultToleranceSupported,omitempty" vim:"6.5"` + EncryptionFaultToleranceSupported *bool `xml:"encryptionFaultToleranceSupported" json:"encryptionFaultToleranceSupported,omitempty"` // Indicates whether this host supports suspending, or creating // with-memory snapshots, encrypted virtual machines. - EncryptionMemorySaveSupported *bool `xml:"encryptionMemorySaveSupported" json:"encryptionMemorySaveSupported,omitempty" vim:"6.5"` + EncryptionMemorySaveSupported *bool `xml:"encryptionMemorySaveSupported" json:"encryptionMemorySaveSupported,omitempty"` // Indicates whether this host supports encrypting RDM backed virtual // disks. - EncryptionRDMSupported *bool `xml:"encryptionRDMSupported" json:"encryptionRDMSupported,omitempty" vim:"6.5"` + EncryptionRDMSupported *bool `xml:"encryptionRDMSupported" json:"encryptionRDMSupported,omitempty"` // Indicates whether this host supports encrypting virtual disks with // vFlash cache enabled. - EncryptionVFlashSupported *bool `xml:"encryptionVFlashSupported" json:"encryptionVFlashSupported,omitempty" vim:"6.5"` + EncryptionVFlashSupported *bool `xml:"encryptionVFlashSupported" json:"encryptionVFlashSupported,omitempty"` // Indicates whether this host supports encrypting virtual disks with // Content Based Read Cache (digest disks) enabled. - EncryptionCBRCSupported *bool `xml:"encryptionCBRCSupported" json:"encryptionCBRCSupported,omitempty" vim:"6.5"` + EncryptionCBRCSupported *bool `xml:"encryptionCBRCSupported" json:"encryptionCBRCSupported,omitempty"` // Indicates whether this host supports encrypting virtual disks with // Host Based Replication enabled. - EncryptionHBRSupported *bool `xml:"encryptionHBRSupported" json:"encryptionHBRSupported,omitempty" vim:"6.5"` + EncryptionHBRSupported *bool `xml:"encryptionHBRSupported" json:"encryptionHBRSupported,omitempty"` // Indicates whether this host supports Fault Tolerance VMs that have // specified UEFI firmware. - FtEfiSupported *bool `xml:"ftEfiSupported" json:"ftEfiSupported,omitempty" vim:"6.7"` + FtEfiSupported *bool `xml:"ftEfiSupported" json:"ftEfiSupported,omitempty"` // Indicates which kind of VMFS unmap method is supported. // // See // `HostCapabilityUnmapMethodSupported_enum` - UnmapMethodSupported string `xml:"unmapMethodSupported,omitempty" json:"unmapMethodSupported,omitempty" vim:"6.7"` + UnmapMethodSupported string `xml:"unmapMethodSupported,omitempty" json:"unmapMethodSupported,omitempty"` // Indicates maximum memory allowed for Fault Tolerance virtual machine. - MaxMemMBPerFtVm int32 `xml:"maxMemMBPerFtVm,omitempty" json:"maxMemMBPerFtVm,omitempty" vim:"6.7"` + MaxMemMBPerFtVm int32 `xml:"maxMemMBPerFtVm,omitempty" json:"maxMemMBPerFtVm,omitempty"` // Indicates that `VirtualMachineFlagInfo.virtualMmuUsage` is // ignored by the host, always operating as if "on" was selected. - VirtualMmuUsageIgnored *bool `xml:"virtualMmuUsageIgnored" json:"virtualMmuUsageIgnored,omitempty" vim:"6.7"` + VirtualMmuUsageIgnored *bool `xml:"virtualMmuUsageIgnored" json:"virtualMmuUsageIgnored,omitempty"` // Indicates that `VirtualMachineFlagInfo.virtualExecUsage` is // ignored by the host, always operating as if "hvOn" was selected. - VirtualExecUsageIgnored *bool `xml:"virtualExecUsageIgnored" json:"virtualExecUsageIgnored,omitempty" vim:"6.7"` + VirtualExecUsageIgnored *bool `xml:"virtualExecUsageIgnored" json:"virtualExecUsageIgnored,omitempty"` // Indicates that createDate feature is supported by the host. - VmCreateDateSupported *bool `xml:"vmCreateDateSupported" json:"vmCreateDateSupported,omitempty" vim:"6.7"` + VmCreateDateSupported *bool `xml:"vmCreateDateSupported" json:"vmCreateDateSupported,omitempty"` // Indicates whether this host supports VMFS-3 EOL. // // If value is not specified, it should be considered as not supported. - Vmfs3EOLSupported *bool `xml:"vmfs3EOLSupported" json:"vmfs3EOLSupported,omitempty" vim:"6.7"` + Vmfs3EOLSupported *bool `xml:"vmfs3EOLSupported" json:"vmfs3EOLSupported,omitempty"` // Indicates whether this host supports VMCP for Fault Tolerance VMs. - FtVmcpSupported *bool `xml:"ftVmcpSupported" json:"ftVmcpSupported,omitempty" vim:"6.7"` + FtVmcpSupported *bool `xml:"ftVmcpSupported" json:"ftVmcpSupported,omitempty"` // Indicates whether this host supports the LoadESX feature // which allows faster reboots. // // See also `HostLoadEsxManager.QueryLoadEsxSupported`. - QuickBootSupported *bool `xml:"quickBootSupported" json:"quickBootSupported,omitempty" vim:"6.7.1"` + QuickBootSupported *bool `xml:"quickBootSupported" json:"quickBootSupported,omitempty"` // Indicates whether this host supports encrypted Fault Tolerance. EncryptedFtSupported *bool `xml:"encryptedFtSupported" json:"encryptedFtSupported,omitempty" vim:"7.0.2.0"` // Indicates whether this host supports Assignable Hardware. - AssignableHardwareSupported *bool `xml:"assignableHardwareSupported" json:"assignableHardwareSupported,omitempty" vim:"7.0"` + AssignableHardwareSupported *bool `xml:"assignableHardwareSupported" json:"assignableHardwareSupported,omitempty"` // Indicates whether this host supports suspending virtual machines to memory. SuspendToMemorySupported *bool `xml:"suspendToMemorySupported" json:"suspendToMemorySupported,omitempty" vim:"7.0.2.0"` // Indicates whether this host uses vmFeatures for compatibility checking // of old (≤8) virtual hardware version VMs. - UseFeatureReqsForOldHWv *bool `xml:"useFeatureReqsForOldHWv" json:"useFeatureReqsForOldHWv,omitempty" vim:"6.8.7"` + UseFeatureReqsForOldHWv *bool `xml:"useFeatureReqsForOldHWv" json:"useFeatureReqsForOldHWv,omitempty"` // Indicates whether this host supports marking specified LUN as // perennially reserved. - MarkPerenniallyReservedSupported *bool `xml:"markPerenniallyReservedSupported" json:"markPerenniallyReservedSupported,omitempty" vim:"6.7.2"` + MarkPerenniallyReservedSupported *bool `xml:"markPerenniallyReservedSupported" json:"markPerenniallyReservedSupported,omitempty"` // Indicates whether this host supports HPP path selection policy // settings. - HppPspSupported *bool `xml:"hppPspSupported" json:"hppPspSupported,omitempty" vim:"7.0"` + HppPspSupported *bool `xml:"hppPspSupported" json:"hppPspSupported,omitempty"` // Indicates whether device rebind without reboot is supported. // // This is // the capability which enables PCI passthrough and SR-IOV configuration // without reboot. - DeviceRebindWithoutRebootSupported *bool `xml:"deviceRebindWithoutRebootSupported" json:"deviceRebindWithoutRebootSupported,omitempty" vim:"7.0"` + DeviceRebindWithoutRebootSupported *bool `xml:"deviceRebindWithoutRebootSupported" json:"deviceRebindWithoutRebootSupported,omitempty"` // Indicates whether this host supports storage policy change. - StoragePolicyChangeSupported *bool `xml:"storagePolicyChangeSupported" json:"storagePolicyChangeSupported,omitempty" vim:"7.0"` + StoragePolicyChangeSupported *bool `xml:"storagePolicyChangeSupported" json:"storagePolicyChangeSupported,omitempty"` // Indicates whether this host supports date time synchronization over // Precision Time Protocol (PTP). - PrecisionTimeProtocolSupported *bool `xml:"precisionTimeProtocolSupported" json:"precisionTimeProtocolSupported,omitempty" vim:"7.0"` + PrecisionTimeProtocolSupported *bool `xml:"precisionTimeProtocolSupported" json:"precisionTimeProtocolSupported,omitempty"` // Indicates whether vMotion of a VM with remote devices attached is // supported. // // This applies to CD-ROM and floppy devices backed by a // remote client. - RemoteDeviceVMotionSupported *bool `xml:"remoteDeviceVMotionSupported" json:"remoteDeviceVMotionSupported,omitempty" vim:"7.0"` + RemoteDeviceVMotionSupported *bool `xml:"remoteDeviceVMotionSupported" json:"remoteDeviceVMotionSupported,omitempty"` // The maximum amount of virtual memory supported per virtual machine. // // If this capability is not set, the size is limited by hardware version // of the virtual machine only. - MaxSupportedVmMemory int32 `xml:"maxSupportedVmMemory,omitempty" json:"maxSupportedVmMemory,omitempty" vim:"7.0"` + MaxSupportedVmMemory int32 `xml:"maxSupportedVmMemory,omitempty" json:"maxSupportedVmMemory,omitempty"` // Indicates if the host supports Assignable Hardware device hints. AhDeviceHintsSupported *bool `xml:"ahDeviceHintsSupported" json:"ahDeviceHintsSupported,omitempty" vim:"7.0.2.0"` // Indicates if access to NVMe over TCP devices is supported. @@ -34451,6 +34391,25 @@ type HostCapability struct { VsanNicMgmtSupported *bool `xml:"vsanNicMgmtSupported" json:"vsanNicMgmtSupported,omitempty" vim:"8.0.2.0"` // Indicates whether vVol NQN is supported on this host. VvolNQNSupported *bool `xml:"vvolNQNSupported" json:"vvolNQNSupported,omitempty" vim:"8.0.2.0"` + // Indicates whether "stretched" vVol Storage Container is supported on this host. + StretchedSCSupported *bool `xml:"stretchedSCSupported" json:"stretchedSCSupported,omitempty" vim:"8.0.3.0"` + // Indicates whether vmknic binding is supported on NFSv41 over this host. + VmknicBindingOnNFSv41 *bool `xml:"vmknicBindingOnNFSv41" json:"vmknicBindingOnNFSv41,omitempty" vim:"8.0.3.0"` + // Indicates whether VasaProvider Status can be monitored on the host. + VpStatusCheckSupported *bool `xml:"vpStatusCheckSupported" json:"vpStatusCheckSupported,omitempty" vim:"8.0.3.0"` + // Indicates whether NFS41 NCONNECT is supported on this host. + NConnectSupported *bool `xml:"nConnectSupported" json:"nConnectSupported,omitempty" vim:"8.0.3.0"` + // Indicates whether user-provided private key installation is supported on this host. + UserKeySupported *bool `xml:"userKeySupported" json:"userKeySupported,omitempty" vim:"8.0.3.0"` + // Indicates whether non-disruptive certificate management is supported on this host. + NdcmSupported *bool `xml:"ndcmSupported" json:"ndcmSupported,omitempty" vim:"8.0.3.0"` + // Flag indicating that the firmware reports the use of UEFI Secure + // Boot during system boot. + // + // TPM attestation may be used to definitively determine the boot + // time UEFI Secure Boot state and its complete configuration. An + // out-of-band management channel may also be considered. + UefiSecureBoot *bool `xml:"uefiSecureBoot" json:"uefiSecureBoot,omitempty" vim:"8.0.3.0"` } func init() { @@ -34482,7 +34441,6 @@ type HostCertificateManagerCertificateInfo struct { func init() { t["HostCertificateManagerCertificateInfo"] = reflect.TypeOf((*HostCertificateManagerCertificateInfo)(nil)).Elem() - minAPIVersionForType["HostCertificateManagerCertificateInfo"] = "6.0" } // Represents certificate specification used for @@ -34721,7 +34679,6 @@ type HostComplianceCheckedEvent struct { func init() { t["HostComplianceCheckedEvent"] = reflect.TypeOf((*HostComplianceCheckedEvent)(nil)).Elem() - minAPIVersionForType["HostComplianceCheckedEvent"] = "4.0" } // This event records that host is in compliance. @@ -34731,7 +34688,6 @@ type HostCompliantEvent struct { func init() { t["HostCompliantEvent"] = reflect.TypeOf((*HostCompliantEvent)(nil)).Elem() - minAPIVersionForType["HostCompliantEvent"] = "4.0" } // This event records that a configuration was applied on a host @@ -34741,7 +34697,6 @@ type HostConfigAppliedEvent struct { func init() { t["HostConfigAppliedEvent"] = reflect.TypeOf((*HostConfigAppliedEvent)(nil)).Elem() - minAPIVersionForType["HostConfigAppliedEvent"] = "4.0" } // This data object type describes types and constants related to the @@ -34767,7 +34722,6 @@ type HostConfigFailed struct { func init() { t["HostConfigFailed"] = reflect.TypeOf((*HostConfigFailed)(nil)).Elem() - minAPIVersionForType["HostConfigFailed"] = "4.0" } type HostConfigFailedFault HostConfigFailed @@ -34806,18 +34760,20 @@ type HostConfigInfo struct { // Information about a product. Product AboutInfo `xml:"product" json:"product"` // Deployment information about the host. - DeploymentInfo *HostDeploymentInfo `xml:"deploymentInfo,omitempty" json:"deploymentInfo,omitempty" vim:"6.5"` + DeploymentInfo *HostDeploymentInfo `xml:"deploymentInfo,omitempty" json:"deploymentInfo,omitempty"` // If hyperthreading is supported, this is the CPU configuration for // optimizing hyperthreading. HyperThread *HostHyperThreadScheduleInfo `xml:"hyperThread,omitempty" json:"hyperThread,omitempty"` + // Information about the CPU scheduler on the host. + CpuScheduler *HostCpuSchedulerInfo `xml:"cpuScheduler,omitempty" json:"cpuScheduler,omitempty" vim:"8.0.3.0"` // Memory configuration. ConsoleReservation *ServiceConsoleReservationInfo `xml:"consoleReservation,omitempty" json:"consoleReservation,omitempty"` // Virtual machine memory configuration. - VirtualMachineReservation *VirtualMachineMemoryReservationInfo `xml:"virtualMachineReservation,omitempty" json:"virtualMachineReservation,omitempty" vim:"2.5"` + VirtualMachineReservation *VirtualMachineMemoryReservationInfo `xml:"virtualMachineReservation,omitempty" json:"virtualMachineReservation,omitempty"` // Storage system information. StorageDevice *HostStorageDeviceInfo `xml:"storageDevice,omitempty" json:"storageDevice,omitempty"` // Storage multipath state information. - MultipathState *HostMultipathStateInfo `xml:"multipathState,omitempty" json:"multipathState,omitempty" vim:"4.0"` + MultipathState *HostMultipathStateInfo `xml:"multipathState,omitempty" json:"multipathState,omitempty"` // Storage system file system volume information. FileSystemVolume *HostFileSystemVolumeInfo `xml:"fileSystemVolume,omitempty" json:"fileSystemVolume,omitempty"` // Datastore paths of files used by the host system on @@ -34825,7 +34781,7 @@ type HostConfigInfo struct { // host. // // For information on datastore paths, see `Datastore`. - SystemFile []string `xml:"systemFile,omitempty" json:"systemFile,omitempty" vim:"4.1"` + SystemFile []string `xml:"systemFile,omitempty" json:"systemFile,omitempty"` // Network system information. Network *HostNetworkInfo `xml:"network,omitempty" json:"network,omitempty"` // Deprecated as of VI API 4.0, use `HostConfigInfo.virtualNicManagerInfo`. @@ -34833,11 +34789,11 @@ type HostConfigInfo struct { // VMotion system information. Vmotion *HostVMotionInfo `xml:"vmotion,omitempty" json:"vmotion,omitempty"` // VirtualNic manager information. - VirtualNicManagerInfo *HostVirtualNicManagerInfo `xml:"virtualNicManagerInfo,omitempty" json:"virtualNicManagerInfo,omitempty" vim:"4.0"` + VirtualNicManagerInfo *HostVirtualNicManagerInfo `xml:"virtualNicManagerInfo,omitempty" json:"virtualNicManagerInfo,omitempty"` // Capability vector indicating the available network features. Capabilities *HostNetCapabilities `xml:"capabilities,omitempty" json:"capabilities,omitempty"` // Capability vector indicating available datastore features. - DatastoreCapabilities *HostDatastoreSystemCapabilities `xml:"datastoreCapabilities,omitempty" json:"datastoreCapabilities,omitempty" vim:"2.5"` + DatastoreCapabilities *HostDatastoreSystemCapabilities `xml:"datastoreCapabilities,omitempty" json:"datastoreCapabilities,omitempty"` // Deprecated as of VI API 4.0, the system defaults will be used. // // capabilities to offload operations either to the host or to physical @@ -34874,20 +34830,20 @@ type HostConfigInfo struct { // Note: Using a host-specific swap location may degrade VMotion performance. // // Refers instance of `Datastore`. - LocalSwapDatastore *ManagedObjectReference `xml:"localSwapDatastore,omitempty" json:"localSwapDatastore,omitempty" vim:"2.5"` + LocalSwapDatastore *ManagedObjectReference `xml:"localSwapDatastore,omitempty" json:"localSwapDatastore,omitempty"` // The system swap configuration specifies which options are currently // enabled. // // See also `HostSystemSwapConfiguration`. - SystemSwapConfiguration *HostSystemSwapConfiguration `xml:"systemSwapConfiguration,omitempty" json:"systemSwapConfiguration,omitempty" vim:"5.1"` + SystemSwapConfiguration *HostSystemSwapConfiguration `xml:"systemSwapConfiguration,omitempty" json:"systemSwapConfiguration,omitempty"` // Reference for the system resource hierarchy, used for configuring // the set of resources reserved to the system and unavailable to // virtual machines. SystemResources *HostSystemResourceInfo `xml:"systemResources,omitempty" json:"systemResources,omitempty"` // Date/Time related configuration - DateTimeInfo *HostDateTimeInfo `xml:"dateTimeInfo,omitempty" json:"dateTimeInfo,omitempty" vim:"2.5"` + DateTimeInfo *HostDateTimeInfo `xml:"dateTimeInfo,omitempty" json:"dateTimeInfo,omitempty"` // Additional flags for a host. - Flags *HostFlagInfo `xml:"flags,omitempty" json:"flags,omitempty" vim:"2.5"` + Flags *HostFlagInfo `xml:"flags,omitempty" json:"flags,omitempty"` // Deprecated as of vSphere API 6.0, use `HostConfigInfo.lockdownMode`. // // If the flag is true, the permissions on the host have been modified such @@ -34902,99 +34858,99 @@ type HostConfigInfo struct { // should be ignored. // // See also `HostSystem.EnterLockdownMode`, `HostSystem.ExitLockdownMode`. - AdminDisabled *bool `xml:"adminDisabled" json:"adminDisabled,omitempty" vim:"2.5"` + AdminDisabled *bool `xml:"adminDisabled" json:"adminDisabled,omitempty"` // Indicates the current lockdown mode of the host as reported by // `HostAccessManager.lockdownMode`. // // See also `HostAccessManager.ChangeLockdownMode`. - LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty" json:"lockdownMode,omitempty" vim:"6.0"` + LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty" json:"lockdownMode,omitempty"` // IPMI (Intelligent Platform Management Interface) info for the host. - Ipmi *HostIpmiInfo `xml:"ipmi,omitempty" json:"ipmi,omitempty" vim:"4.0"` + Ipmi *HostIpmiInfo `xml:"ipmi,omitempty" json:"ipmi,omitempty"` // Deprecated as of vSphere API 5.0, use `HostConfigInfo.sslThumbprintData` instead. // // SSL Thumbprint info for hosts registered on this host. - SslThumbprintInfo *HostSslThumbprintInfo `xml:"sslThumbprintInfo,omitempty" json:"sslThumbprintInfo,omitempty" vim:"4.0"` + SslThumbprintInfo *HostSslThumbprintInfo `xml:"sslThumbprintInfo,omitempty" json:"sslThumbprintInfo,omitempty"` // SSL Thumbprints registered on this host. - SslThumbprintData []HostSslThumbprintInfo `xml:"sslThumbprintData,omitempty" json:"sslThumbprintData,omitempty" vim:"5.0"` + SslThumbprintData []HostSslThumbprintInfo `xml:"sslThumbprintData,omitempty" json:"sslThumbprintData,omitempty"` // Full Host Certificate in PEM format, if known - Certificate []byte `xml:"certificate,omitempty" json:"certificate,omitempty" vim:"5.0"` + Certificate ByteSlice `xml:"certificate,omitempty" json:"certificate,omitempty"` // PCI passthrough information. - PciPassthruInfo []BaseHostPciPassthruInfo `xml:"pciPassthruInfo,omitempty,typeattr" json:"pciPassthruInfo,omitempty" vim:"4.0"` + PciPassthruInfo []BaseHostPciPassthruInfo `xml:"pciPassthruInfo,omitempty,typeattr" json:"pciPassthruInfo,omitempty"` // Current authentication configuration. - AuthenticationManagerInfo *HostAuthenticationManagerInfo `xml:"authenticationManagerInfo,omitempty" json:"authenticationManagerInfo,omitempty" vim:"4.1"` + AuthenticationManagerInfo *HostAuthenticationManagerInfo `xml:"authenticationManagerInfo,omitempty" json:"authenticationManagerInfo,omitempty"` // List of feature-specific version information. // // Each element refers // to the version information for a specific feature. - FeatureVersion []HostFeatureVersionInfo `xml:"featureVersion,omitempty" json:"featureVersion,omitempty" vim:"4.1"` + FeatureVersion []HostFeatureVersionInfo `xml:"featureVersion,omitempty" json:"featureVersion,omitempty"` // Host power management capability. - PowerSystemCapability *PowerSystemCapability `xml:"powerSystemCapability,omitempty" json:"powerSystemCapability,omitempty" vim:"4.1"` + PowerSystemCapability *PowerSystemCapability `xml:"powerSystemCapability,omitempty" json:"powerSystemCapability,omitempty"` // Host power management information. - PowerSystemInfo *PowerSystemInfo `xml:"powerSystemInfo,omitempty" json:"powerSystemInfo,omitempty" vim:"4.1"` + PowerSystemInfo *PowerSystemInfo `xml:"powerSystemInfo,omitempty" json:"powerSystemInfo,omitempty"` // Host solid stats drive cache configuration information. - CacheConfigurationInfo []HostCacheConfigurationInfo `xml:"cacheConfigurationInfo,omitempty" json:"cacheConfigurationInfo,omitempty" vim:"5.0"` + CacheConfigurationInfo []HostCacheConfigurationInfo `xml:"cacheConfigurationInfo,omitempty" json:"cacheConfigurationInfo,omitempty"` // Indicates if a host is wake on lan capable. // // A host is deemed wake on lan capable if there exists at least one // physical network card that is both backing the vmotion interface and // is itself wake on lan capable. - WakeOnLanCapable *bool `xml:"wakeOnLanCapable" json:"wakeOnLanCapable,omitempty" vim:"5.0"` + WakeOnLanCapable *bool `xml:"wakeOnLanCapable" json:"wakeOnLanCapable,omitempty"` // Array of host feature capabilities. // // This is expected to change // infrequently. It may change while host is in maintenance mode // and between reboots if hardware, firmware, or a device driver // is changed or upgraded. - FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty" json:"featureCapability,omitempty" vim:"5.1"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty" json:"featureCapability,omitempty"` // Array of the feature capabilities that the host has after the // mask has been applied. - MaskedFeatureCapability []HostFeatureCapability `xml:"maskedFeatureCapability,omitempty" json:"maskedFeatureCapability,omitempty" vim:"5.1"` + MaskedFeatureCapability []HostFeatureCapability `xml:"maskedFeatureCapability,omitempty" json:"maskedFeatureCapability,omitempty"` // Host vFlash configuration information - VFlashConfigInfo *HostVFlashManagerVFlashConfigInfo `xml:"vFlashConfigInfo,omitempty" json:"vFlashConfigInfo,omitempty" vim:"5.5"` + VFlashConfigInfo *HostVFlashManagerVFlashConfigInfo `xml:"vFlashConfigInfo,omitempty" json:"vFlashConfigInfo,omitempty"` // VSAN configuration for a host. - VsanHostConfig *VsanHostConfigInfo `xml:"vsanHostConfig,omitempty" json:"vsanHostConfig,omitempty" vim:"5.5"` + VsanHostConfig *VsanHostConfigInfo `xml:"vsanHostConfig,omitempty" json:"vsanHostConfig,omitempty"` // List of Windows domains available for user searches, if the underlying // system supports windows domain membership. // // See `UserDirectory.domainList`. - DomainList []string `xml:"domainList,omitempty" json:"domainList,omitempty" vim:"6.0"` + DomainList []string `xml:"domainList,omitempty" json:"domainList,omitempty"` // A checksum of overhead computation script. // // (For VMware internal usage only) - ScriptCheckSum []byte `xml:"scriptCheckSum,omitempty" json:"scriptCheckSum,omitempty" vim:"6.0"` + ScriptCheckSum []byte `xml:"scriptCheckSum,omitempty" json:"scriptCheckSum,omitempty"` // A checksum of host configuration option set. // // (For VMware internal usage only) - HostConfigCheckSum []byte `xml:"hostConfigCheckSum,omitempty" json:"hostConfigCheckSum,omitempty" vim:"6.0"` + HostConfigCheckSum []byte `xml:"hostConfigCheckSum,omitempty" json:"hostConfigCheckSum,omitempty"` // A checksum of the Assignable Hardware Description Tree. // // (For VMware internal usage only) - DescriptionTreeCheckSum []byte `xml:"descriptionTreeCheckSum,omitempty" json:"descriptionTreeCheckSum,omitempty" vim:"7.0"` + DescriptionTreeCheckSum []byte `xml:"descriptionTreeCheckSum,omitempty" json:"descriptionTreeCheckSum,omitempty"` // The list of graphics devices available on this host. - GraphicsInfo []HostGraphicsInfo `xml:"graphicsInfo,omitempty" json:"graphicsInfo,omitempty" vim:"5.5"` + GraphicsInfo []HostGraphicsInfo `xml:"graphicsInfo,omitempty" json:"graphicsInfo,omitempty"` // Array of shared passthru GPU types. // // These GPU types may be enabled // when specific host hardware is present. - SharedPassthruGpuTypes []string `xml:"sharedPassthruGpuTypes,omitempty" json:"sharedPassthruGpuTypes,omitempty" vim:"6.0"` + SharedPassthruGpuTypes []string `xml:"sharedPassthruGpuTypes,omitempty" json:"sharedPassthruGpuTypes,omitempty"` // Graphics configuration for a host. - GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty" json:"graphicsConfig,omitempty" vim:"6.5"` + GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty" json:"graphicsConfig,omitempty"` // Array of shared passthru GPU capablities. // // See also `HostSharedGpuCapabilities`. - SharedGpuCapabilities []HostSharedGpuCapabilities `xml:"sharedGpuCapabilities,omitempty" json:"sharedGpuCapabilities,omitempty" vim:"6.7"` + SharedGpuCapabilities []HostSharedGpuCapabilities `xml:"sharedGpuCapabilities,omitempty" json:"sharedGpuCapabilities,omitempty"` // Information of the IO Filters installed on the host. // // See `HostIoFilterInfo`. - IoFilterInfo []HostIoFilterInfo `xml:"ioFilterInfo,omitempty" json:"ioFilterInfo,omitempty" vim:"6.0"` + IoFilterInfo []HostIoFilterInfo `xml:"ioFilterInfo,omitempty" json:"ioFilterInfo,omitempty"` // Information on SRIOV device pools present on host. - SriovDevicePool []BaseHostSriovDevicePoolInfo `xml:"sriovDevicePool,omitempty,typeattr" json:"sriovDevicePool,omitempty" vim:"6.5"` + SriovDevicePool []BaseHostSriovDevicePoolInfo `xml:"sriovDevicePool,omitempty,typeattr" json:"sriovDevicePool,omitempty"` // Information describing Assignable Hardware device bindings on host. // // See `HostAssignableHardwareBinding`. - AssignableHardwareBinding []HostAssignableHardwareBinding `xml:"assignableHardwareBinding,omitempty" json:"assignableHardwareBinding,omitempty" vim:"7.0"` + AssignableHardwareBinding []HostAssignableHardwareBinding `xml:"assignableHardwareBinding,omitempty" json:"assignableHardwareBinding,omitempty"` // Configured assignable hardware device attributes. - AssignableHardwareConfig *HostAssignableHardwareConfig `xml:"assignableHardwareConfig,omitempty" json:"assignableHardwareConfig,omitempty" vim:"7.0"` + AssignableHardwareConfig *HostAssignableHardwareConfig `xml:"assignableHardwareConfig,omitempty" json:"assignableHardwareConfig,omitempty"` } func init() { @@ -35037,7 +34993,7 @@ type HostConfigManager struct { // The VirtualNic configuration. // // Refers instance of `HostVirtualNicManager`. - VirtualNicManager *ManagedObjectReference `xml:"virtualNicManager,omitempty" json:"virtualNicManager,omitempty" vim:"4.0"` + VirtualNicManager *ManagedObjectReference `xml:"virtualNicManager,omitempty" json:"virtualNicManager,omitempty"` // The configuration of the host services (for example, // SSH, FTP, and Telnet). // @@ -35066,109 +35022,109 @@ type HostConfigManager struct { // DateTime configuration // // Refers instance of `HostDateTimeSystem`. - DateTimeSystem *ManagedObjectReference `xml:"dateTimeSystem,omitempty" json:"dateTimeSystem,omitempty" vim:"2.5"` + DateTimeSystem *ManagedObjectReference `xml:"dateTimeSystem,omitempty" json:"dateTimeSystem,omitempty"` // Host patch management. // // Refers instance of `HostPatchManager`. - PatchManager *ManagedObjectReference `xml:"patchManager,omitempty" json:"patchManager,omitempty" vim:"2.5"` + PatchManager *ManagedObjectReference `xml:"patchManager,omitempty" json:"patchManager,omitempty"` // Host image configuration management. // // Refers instance of `HostImageConfigManager`. - ImageConfigManager *ManagedObjectReference `xml:"imageConfigManager,omitempty" json:"imageConfigManager,omitempty" vim:"5.0"` + ImageConfigManager *ManagedObjectReference `xml:"imageConfigManager,omitempty" json:"imageConfigManager,omitempty"` // Boot device order management. // // Refers instance of `HostBootDeviceSystem`. - BootDeviceSystem *ManagedObjectReference `xml:"bootDeviceSystem,omitempty" json:"bootDeviceSystem,omitempty" vim:"2.5"` + BootDeviceSystem *ManagedObjectReference `xml:"bootDeviceSystem,omitempty" json:"bootDeviceSystem,omitempty"` // Firmware management. // // Refers instance of `HostFirmwareSystem`. - FirmwareSystem *ManagedObjectReference `xml:"firmwareSystem,omitempty" json:"firmwareSystem,omitempty" vim:"2.5"` + FirmwareSystem *ManagedObjectReference `xml:"firmwareSystem,omitempty" json:"firmwareSystem,omitempty"` // System health status manager. // // Refers instance of `HostHealthStatusSystem`. - HealthStatusSystem *ManagedObjectReference `xml:"healthStatusSystem,omitempty" json:"healthStatusSystem,omitempty" vim:"2.5"` + HealthStatusSystem *ManagedObjectReference `xml:"healthStatusSystem,omitempty" json:"healthStatusSystem,omitempty"` // PciDeviceSystem for passthru. // // Refers instance of `HostPciPassthruSystem`. - PciPassthruSystem *ManagedObjectReference `xml:"pciPassthruSystem,omitempty" json:"pciPassthruSystem,omitempty" vim:"4.0"` + PciPassthruSystem *ManagedObjectReference `xml:"pciPassthruSystem,omitempty" json:"pciPassthruSystem,omitempty"` // License manager // // Refers instance of `LicenseManager`. - LicenseManager *ManagedObjectReference `xml:"licenseManager,omitempty" json:"licenseManager,omitempty" vim:"4.0"` + LicenseManager *ManagedObjectReference `xml:"licenseManager,omitempty" json:"licenseManager,omitempty"` // Kernel module configuration management. // // Refers instance of `HostKernelModuleSystem`. - KernelModuleSystem *ManagedObjectReference `xml:"kernelModuleSystem,omitempty" json:"kernelModuleSystem,omitempty" vim:"4.0"` + KernelModuleSystem *ManagedObjectReference `xml:"kernelModuleSystem,omitempty" json:"kernelModuleSystem,omitempty"` // Authentication method configuration - for example, for Active Directory membership. // // Refers instance of `HostAuthenticationManager`. - AuthenticationManager *ManagedObjectReference `xml:"authenticationManager,omitempty" json:"authenticationManager,omitempty" vim:"4.1"` + AuthenticationManager *ManagedObjectReference `xml:"authenticationManager,omitempty" json:"authenticationManager,omitempty"` // Power System manager. // // Refers instance of `HostPowerSystem`. - PowerSystem *ManagedObjectReference `xml:"powerSystem,omitempty" json:"powerSystem,omitempty" vim:"4.1"` + PowerSystem *ManagedObjectReference `xml:"powerSystem,omitempty" json:"powerSystem,omitempty"` // Host solid state drive cache configuration manager. // // Refers instance of `HostCacheConfigurationManager`. - CacheConfigurationManager *ManagedObjectReference `xml:"cacheConfigurationManager,omitempty" json:"cacheConfigurationManager,omitempty" vim:"5.0"` + CacheConfigurationManager *ManagedObjectReference `xml:"cacheConfigurationManager,omitempty" json:"cacheConfigurationManager,omitempty"` // Esx Agent resource configuration manager // // Refers instance of `HostEsxAgentHostManager`. - EsxAgentHostManager *ManagedObjectReference `xml:"esxAgentHostManager,omitempty" json:"esxAgentHostManager,omitempty" vim:"5.0"` + EsxAgentHostManager *ManagedObjectReference `xml:"esxAgentHostManager,omitempty" json:"esxAgentHostManager,omitempty"` // Iscsi Management Operations managed entity // // Refers instance of `IscsiManager`. - IscsiManager *ManagedObjectReference `xml:"iscsiManager,omitempty" json:"iscsiManager,omitempty" vim:"5.0"` + IscsiManager *ManagedObjectReference `xml:"iscsiManager,omitempty" json:"iscsiManager,omitempty"` // vFlash Manager // // Refers instance of `HostVFlashManager`. - VFlashManager *ManagedObjectReference `xml:"vFlashManager,omitempty" json:"vFlashManager,omitempty" vim:"5.5"` + VFlashManager *ManagedObjectReference `xml:"vFlashManager,omitempty" json:"vFlashManager,omitempty"` // VsanSystem managed entity. // // Refers instance of `HostVsanSystem`. - VsanSystem *ManagedObjectReference `xml:"vsanSystem,omitempty" json:"vsanSystem,omitempty" vim:"5.5"` + VsanSystem *ManagedObjectReference `xml:"vsanSystem,omitempty" json:"vsanSystem,omitempty"` // Common Message Bus proxy service. // // This API shall always be present in vSphere API 6.0 or later. // // Refers instance of `MessageBusProxy`. - MessageBusProxy *ManagedObjectReference `xml:"messageBusProxy,omitempty" json:"messageBusProxy,omitempty" vim:"6.0"` + MessageBusProxy *ManagedObjectReference `xml:"messageBusProxy,omitempty" json:"messageBusProxy,omitempty"` // A user directory managed object. // // Refers instance of `UserDirectory`. - UserDirectory *ManagedObjectReference `xml:"userDirectory,omitempty" json:"userDirectory,omitempty" vim:"6.0"` + UserDirectory *ManagedObjectReference `xml:"userDirectory,omitempty" json:"userDirectory,omitempty"` // A manager for host local user accounts. // // Refers instance of `HostLocalAccountManager`. - AccountManager *ManagedObjectReference `xml:"accountManager,omitempty" json:"accountManager,omitempty" vim:"6.0"` + AccountManager *ManagedObjectReference `xml:"accountManager,omitempty" json:"accountManager,omitempty"` // Host access manager // // Refers instance of `HostAccessManager`. - HostAccessManager *ManagedObjectReference `xml:"hostAccessManager,omitempty" json:"hostAccessManager,omitempty" vim:"6.0"` + HostAccessManager *ManagedObjectReference `xml:"hostAccessManager,omitempty" json:"hostAccessManager,omitempty"` // Host graphics manager. // // Refers instance of `HostGraphicsManager`. - GraphicsManager *ManagedObjectReference `xml:"graphicsManager,omitempty" json:"graphicsManager,omitempty" vim:"5.5"` + GraphicsManager *ManagedObjectReference `xml:"graphicsManager,omitempty" json:"graphicsManager,omitempty"` // VsanInternalSystem managed entity. // // Refers instance of `HostVsanInternalSystem`. - VsanInternalSystem *ManagedObjectReference `xml:"vsanInternalSystem,omitempty" json:"vsanInternalSystem,omitempty" vim:"5.5"` + VsanInternalSystem *ManagedObjectReference `xml:"vsanInternalSystem,omitempty" json:"vsanInternalSystem,omitempty"` // Host CertificateManager. // // Refers instance of `HostCertificateManager`. - CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty" json:"certificateManager,omitempty" vim:"6.0"` + CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty" json:"certificateManager,omitempty"` // Host CryptoManager. // // Refers instance of `CryptoManager`. - CryptoManager *ManagedObjectReference `xml:"cryptoManager,omitempty" json:"cryptoManager,omitempty" vim:"6.5"` + CryptoManager *ManagedObjectReference `xml:"cryptoManager,omitempty" json:"cryptoManager,omitempty"` // Host Non-Volatile DIMM configuration manager // // Refers instance of `HostNvdimmSystem`. - NvdimmSystem *ManagedObjectReference `xml:"nvdimmSystem,omitempty" json:"nvdimmSystem,omitempty" vim:"6.7"` + NvdimmSystem *ManagedObjectReference `xml:"nvdimmSystem,omitempty" json:"nvdimmSystem,omitempty"` // Assignable Hardware manager. // // Refers instance of `HostAssignableHardwareManager`. - AssignableHardwareManager *ManagedObjectReference `xml:"assignableHardwareManager,omitempty" json:"assignableHardwareManager,omitempty" vim:"7.0"` + AssignableHardwareManager *ManagedObjectReference `xml:"assignableHardwareManager,omitempty" json:"assignableHardwareManager,omitempty"` } func init() { @@ -35212,18 +35168,17 @@ type HostConfigSpec struct { // Memory configuration for the host. Memory *HostMemorySpec `xml:"memory,omitempty" json:"memory,omitempty"` // Active Directory configuration change. - ActiveDirectory []HostActiveDirectory `xml:"activeDirectory,omitempty" json:"activeDirectory,omitempty" vim:"4.1"` + ActiveDirectory []HostActiveDirectory `xml:"activeDirectory,omitempty" json:"activeDirectory,omitempty"` // Advanced configuration. - GenericConfig []KeyAnyValue `xml:"genericConfig,omitempty" json:"genericConfig,omitempty" vim:"5.0"` + GenericConfig []KeyAnyValue `xml:"genericConfig,omitempty" json:"genericConfig,omitempty"` // Graphics configuration for a host. - GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty" json:"graphicsConfig,omitempty" vim:"6.5"` + GraphicsConfig *HostGraphicsConfig `xml:"graphicsConfig,omitempty" json:"graphicsConfig,omitempty"` // Assignable Hardware configuration for the host - AssignableHardwareConfig *HostAssignableHardwareConfig `xml:"assignableHardwareConfig,omitempty" json:"assignableHardwareConfig,omitempty" vim:"7.0"` + AssignableHardwareConfig *HostAssignableHardwareConfig `xml:"assignableHardwareConfig,omitempty" json:"assignableHardwareConfig,omitempty"` } func init() { t["HostConfigSpec"] = reflect.TypeOf((*HostConfigSpec)(nil)).Elem() - minAPIVersionForType["HostConfigSpec"] = "4.0" } // An overview of the key configuration parameters. @@ -35235,7 +35190,7 @@ type HostConfigSummary struct { // The port number. Port int32 `xml:"port" json:"port"` // The SSL thumbprint of the host, if known. - SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty" vim:"4.0"` + SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty"` // Information about the software running on the host, if known. // // The current supported hosts are ESX Server 2.0.1 (and later) and VMware Server @@ -35244,20 +35199,20 @@ type HostConfigSummary struct { // The flag to indicate whether or not VMotion is enabled on this host. VmotionEnabled bool `xml:"vmotionEnabled" json:"vmotionEnabled"` // The flag to indicate whether or not Fault Tolerance logging is enabled on this host. - FaultToleranceEnabled *bool `xml:"faultToleranceEnabled" json:"faultToleranceEnabled,omitempty" vim:"4.0"` + FaultToleranceEnabled *bool `xml:"faultToleranceEnabled" json:"faultToleranceEnabled,omitempty"` // List of feature-specific version information. // // Each element refers // to the version information for a specific feature. - FeatureVersion []HostFeatureVersionInfo `xml:"featureVersion,omitempty" json:"featureVersion,omitempty" vim:"4.1"` + FeatureVersion []HostFeatureVersionInfo `xml:"featureVersion,omitempty" json:"featureVersion,omitempty"` // Datastore used to deploy Agent VMs on for this host. // // Refers instance of `Datastore`. - AgentVmDatastore *ManagedObjectReference `xml:"agentVmDatastore,omitempty" json:"agentVmDatastore,omitempty" vim:"5.0"` + AgentVmDatastore *ManagedObjectReference `xml:"agentVmDatastore,omitempty" json:"agentVmDatastore,omitempty"` // Management network for Agent VMs. // // Refers instance of `Network`. - AgentVmNetwork *ManagedObjectReference `xml:"agentVmNetwork,omitempty" json:"agentVmNetwork,omitempty" vim:"5.0"` + AgentVmNetwork *ManagedObjectReference `xml:"agentVmNetwork,omitempty" json:"agentVmNetwork,omitempty"` } func init() { @@ -35335,7 +35290,7 @@ type HostConnectInfo struct { // If // this is the case, remove or disconnect the host // from this cluster before adding it to another vCenter Server. - InDasCluster *bool `xml:"inDasCluster" json:"inDasCluster,omitempty" vim:"5.0"` + InDasCluster *bool `xml:"inDasCluster" json:"inDasCluster,omitempty"` // Summary information about the host. // // The status fields and managed object @@ -35360,9 +35315,9 @@ type HostConnectInfo struct { // The list of datastores on the host. Datastore []BaseHostDatastoreConnectInfo `xml:"datastore,omitempty,typeattr" json:"datastore,omitempty"` // License manager information on the host - License *HostLicenseConnectInfo `xml:"license,omitempty" json:"license,omitempty" vim:"4.0"` + License *HostLicenseConnectInfo `xml:"license,omitempty" json:"license,omitempty"` // Host capabilities. - Capability *HostCapability `xml:"capability,omitempty" json:"capability,omitempty" vim:"6.0"` + Capability *HostCapability `xml:"capability,omitempty" json:"capability,omitempty"` } func init() { @@ -35422,7 +35377,7 @@ type HostConnectSpec struct { // the string representation of that hash in the format: // xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx // where, 'x' represents a hexadecimal digit - SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty" vim:"2.5"` + SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty"` // The administration account on the host. // // (Required for adding @@ -35462,7 +35417,7 @@ type HostConnectSpec struct { // address used when communicating with the host. Setting this field is useful // when the VirtualCenter server is behind a NAT in which case the external NAT // address must be used. - ManagementIp string `xml:"managementIp,omitempty" json:"managementIp,omitempty" vim:"4.0"` + ManagementIp string `xml:"managementIp,omitempty" json:"managementIp,omitempty"` // If this is set then the host will be put in the specified lockdown mode // when the host is added and connected. // @@ -35479,14 +35434,14 @@ type HostConnectSpec struct { // // If equal to `lockdownDisabled` // then it is ignored. - LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty" json:"lockdownMode,omitempty" vim:"6.0"` + LockdownMode HostLockdownMode `xml:"lockdownMode,omitempty" json:"lockdownMode,omitempty"` // Deprecated not supported since vSphere 6.5. // // Setting for a gateway for communication to the host. // // If set all trafic to the // host will pass through this gateway. - HostGateway *HostGatewaySpec `xml:"hostGateway,omitempty" json:"hostGateway,omitempty" vim:"6.0"` + HostGateway *HostGatewaySpec `xml:"hostGateway,omitempty" json:"hostGateway,omitempty"` } func init() { @@ -35652,6 +35607,12 @@ type HostCpuPackage struct { // This is independent of // the product and licensing capabilities. CpuFeature []HostCpuIdInfo `xml:"cpuFeature,omitempty" json:"cpuFeature,omitempty"` + // Family ID for the CPU + Family int16 `xml:"family,omitempty" json:"family,omitempty" vim:"8.0.3.0"` + // Model number of the CPU + Model int16 `xml:"model,omitempty" json:"model,omitempty" vim:"8.0.3.0"` + // Stepping ID of the CPU + Stepping int16 `xml:"stepping,omitempty" json:"stepping,omitempty" vim:"8.0.3.0"` } func init() { @@ -35671,7 +35632,20 @@ type HostCpuPowerManagementInfo struct { func init() { t["HostCpuPowerManagementInfo"] = reflect.TypeOf((*HostCpuPowerManagementInfo)(nil)).Elem() - minAPIVersionForType["HostCpuPowerManagementInfo"] = "4.0" +} + +// This data object describes the information related to the CPU scheduler +// running on the Host. +type HostCpuSchedulerInfo struct { + DynamicData + + // The `policy` active for CPU Scheduling. + Policy string `xml:"policy" json:"policy"` +} + +func init() { + t["HostCpuSchedulerInfo"] = reflect.TypeOf((*HostCpuSchedulerInfo)(nil)).Elem() + minAPIVersionForType["HostCpuSchedulerInfo"] = "8.0.3.0" } // The parameters of `HostVStorageObjectManager.HostCreateDisk_Task`. @@ -35744,7 +35718,7 @@ type HostDasErrorEvent struct { Message string `xml:"message,omitempty" json:"message,omitempty"` // The reason for the failure. - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"4.0"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { @@ -35758,7 +35732,6 @@ type HostDasEvent struct { func init() { t["HostDasEvent"] = reflect.TypeOf((*HostDasEvent)(nil)).Elem() - minAPIVersionForType["HostDasEvent"] = "2.5" } // Deprecated as of vSphere API 5.0, the event is no longer relevant. @@ -35932,12 +35905,11 @@ type HostDatastoreSystemCapabilities struct { // Indicates whether local datastores are supported. LocalDatastoreSupported bool `xml:"localDatastoreSupported" json:"localDatastoreSupported"` // Indicates whether vmfs extent expansion is supported. - VmfsExtentExpansionSupported *bool `xml:"vmfsExtentExpansionSupported" json:"vmfsExtentExpansionSupported,omitempty" vim:"4.0"` + VmfsExtentExpansionSupported *bool `xml:"vmfsExtentExpansionSupported" json:"vmfsExtentExpansionSupported,omitempty"` } func init() { t["HostDatastoreSystemCapabilities"] = reflect.TypeOf((*HostDatastoreSystemCapabilities)(nil)).Elem() - minAPIVersionForType["HostDatastoreSystemCapabilities"] = "2.5" } // Contains result of remove datastore request. @@ -35957,7 +35929,6 @@ type HostDatastoreSystemDatastoreResult struct { func init() { t["HostDatastoreSystemDatastoreResult"] = reflect.TypeOf((*HostDatastoreSystemDatastoreResult)(nil)).Elem() - minAPIVersionForType["HostDatastoreSystemDatastoreResult"] = "6.0" } // Specification for creating Virtual Volumed based datastore. @@ -35975,7 +35946,6 @@ type HostDatastoreSystemVvolDatastoreSpec struct { func init() { t["HostDatastoreSystemVvolDatastoreSpec"] = reflect.TypeOf((*HostDatastoreSystemVvolDatastoreSpec)(nil)).Elem() - minAPIVersionForType["HostDatastoreSystemVvolDatastoreSpec"] = "6.0" } // This data object represents the dateTime configuration of the host. @@ -36021,7 +35991,6 @@ type HostDateTimeConfig struct { func init() { t["HostDateTimeConfig"] = reflect.TypeOf((*HostDateTimeConfig)(nil)).Elem() - minAPIVersionForType["HostDateTimeConfig"] = "2.5" } // This data object represents the dateTime configuration of the host. @@ -36033,7 +36002,7 @@ type HostDateTimeInfo struct { // The system clock synchronization protocol. // // See `HostDateTimeInfoProtocol_enum` for possible values. - SystemClockProtocol string `xml:"systemClockProtocol,omitempty" json:"systemClockProtocol,omitempty" vim:"7.0"` + SystemClockProtocol string `xml:"systemClockProtocol,omitempty" json:"systemClockProtocol,omitempty"` // The NTP configuration on the host. NtpConfig *HostNtpConfig `xml:"ntpConfig,omitempty" json:"ntpConfig,omitempty"` // The PTP configuration on the host. @@ -36076,7 +36045,6 @@ type HostDateTimeInfo struct { func init() { t["HostDateTimeInfo"] = reflect.TypeOf((*HostDateTimeInfo)(nil)).Elem() - minAPIVersionForType["HostDateTimeInfo"] = "2.5" } type HostDateTimeSystemServiceTestResult struct { @@ -36090,6 +36058,7 @@ type HostDateTimeSystemServiceTestResult struct { func init() { t["HostDateTimeSystemServiceTestResult"] = reflect.TypeOf((*HostDateTimeSystemServiceTestResult)(nil)).Elem() + minAPIVersionForType["HostDateTimeSystemServiceTestResult"] = "7.0.3.0" } type HostDateTimeSystemTimeZone struct { @@ -36124,6 +36093,7 @@ type HostDeleteVStorageObjectExRequestType struct { func init() { t["HostDeleteVStorageObjectExRequestType"] = reflect.TypeOf((*HostDeleteVStorageObjectExRequestType)(nil)).Elem() + minAPIVersionForType["HostDeleteVStorageObjectExRequestType"] = "7.0.2.0" } type HostDeleteVStorageObjectEx_Task HostDeleteVStorageObjectExRequestType @@ -36172,7 +36142,6 @@ type HostDeploymentInfo struct { func init() { t["HostDeploymentInfo"] = reflect.TypeOf((*HostDeploymentInfo)(nil)).Elem() - minAPIVersionForType["HostDeploymentInfo"] = "6.5" } // This data object type defines a device on the host. @@ -36210,7 +36179,6 @@ type HostDhcpService struct { func init() { t["HostDhcpService"] = reflect.TypeOf((*HostDhcpService)(nil)).Elem() - minAPIVersionForType["HostDhcpService"] = "2.5" } // This data object type describes the configuration of a DHCP service @@ -36232,7 +36200,6 @@ type HostDhcpServiceConfig struct { func init() { t["HostDhcpServiceConfig"] = reflect.TypeOf((*HostDhcpServiceConfig)(nil)).Elem() - minAPIVersionForType["HostDhcpServiceConfig"] = "2.5" } type HostDhcpServiceSpec struct { @@ -36384,14 +36351,13 @@ type HostDigestInfo struct { DigestMethod string `xml:"digestMethod" json:"digestMethod"` // The variable length byte array containing the digest value calculated by // the specified digestMethod. - DigestValue []byte `xml:"digestValue" json:"digestValue"` + DigestValue ByteSlice `xml:"digestValue" json:"digestValue"` // The name of the object from which this digest value is calcaulated. ObjectName string `xml:"objectName,omitempty" json:"objectName,omitempty"` } func init() { t["HostDigestInfo"] = reflect.TypeOf((*HostDigestInfo)(nil)).Elem() - minAPIVersionForType["HostDigestInfo"] = "4.0" } // `HostDirectoryStoreInfo` is a base class for objects that @@ -36402,7 +36368,6 @@ type HostDirectoryStoreInfo struct { func init() { t["HostDirectoryStoreInfo"] = reflect.TypeOf((*HostDirectoryStoreInfo)(nil)).Elem() - minAPIVersionForType["HostDirectoryStoreInfo"] = "4.1" } // This event records a disconnection from a host. @@ -36410,7 +36375,7 @@ type HostDisconnectedEvent struct { HostEvent // Reason why the host was disconnected. - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"4.0"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { @@ -36434,7 +36399,6 @@ type HostDiskConfigurationResult struct { func init() { t["HostDiskConfigurationResult"] = reflect.TypeOf((*HostDiskConfigurationResult)(nil)).Elem() - minAPIVersionForType["HostDiskConfigurationResult"] = "5.5" } // This data object type describes multiple coordinate systems @@ -36587,7 +36551,7 @@ type HostDiskPartitionAttributes struct { // // This is available only for GPT formatted // disks. - Guid string `xml:"guid,omitempty" json:"guid,omitempty" vim:"5.0"` + Guid string `xml:"guid,omitempty" json:"guid,omitempty"` // The flag to indicate whether or not the partition is // logical. // @@ -36599,7 +36563,7 @@ type HostDiskPartitionAttributes struct { // Partition alignment in bytes. // // If unset, partition alignment value is unknown. - PartitionAlignment int64 `xml:"partitionAlignment,omitempty" json:"partitionAlignment,omitempty" vim:"5.0"` + PartitionAlignment int64 `xml:"partitionAlignment,omitempty" json:"partitionAlignment,omitempty"` } func init() { @@ -36707,7 +36671,7 @@ type HostDiskPartitionSpec struct { DynamicData // Partition format type on the disk. - PartitionFormat string `xml:"partitionFormat,omitempty" json:"partitionFormat,omitempty" vim:"5.0"` + PartitionFormat string `xml:"partitionFormat,omitempty" json:"partitionFormat,omitempty"` // Disk dimensions expressed as cylinder, head, sector (CHS) // coordinates. Chs *HostDiskDimensionsChs `xml:"chs,omitempty" json:"chs,omitempty"` @@ -36753,7 +36717,7 @@ type HostDnsConfig struct { // // This field is ignored if DHCP is disabled by the // `dhcp` property. - Ipv6VirtualNicDevice string `xml:"ipv6VirtualNicDevice,omitempty" json:"ipv6VirtualNicDevice,omitempty" vim:"6.7"` + Ipv6VirtualNicDevice string `xml:"ipv6VirtualNicDevice,omitempty" json:"ipv6VirtualNicDevice,omitempty"` // The host name portion of DNS name. // // For example, "esx01". @@ -36796,12 +36760,11 @@ type HostDnsConfigSpec struct { // Choose a Virtual nic based on what it is connected to. VirtualNicConnection *HostVirtualNicConnection `xml:"virtualNicConnection,omitempty" json:"virtualNicConnection,omitempty"` // Choose an IPv6 Virtual nic based on what it is connected to. - VirtualNicConnectionV6 *HostVirtualNicConnection `xml:"virtualNicConnectionV6,omitempty" json:"virtualNicConnectionV6,omitempty" vim:"6.7"` + VirtualNicConnectionV6 *HostVirtualNicConnection `xml:"virtualNicConnectionV6,omitempty" json:"virtualNicConnectionV6,omitempty"` } func init() { t["HostDnsConfigSpec"] = reflect.TypeOf((*HostDnsConfigSpec)(nil)).Elem() - minAPIVersionForType["HostDnsConfigSpec"] = "4.0" } // Provides information about a single Device Virtualization Extensions (DVX) @@ -36834,7 +36797,6 @@ type HostEnableAdminFailedEvent struct { func init() { t["HostEnableAdminFailedEvent"] = reflect.TypeOf((*HostEnableAdminFailedEvent)(nil)).Elem() - minAPIVersionForType["HostEnableAdminFailedEvent"] = "2.5" } // EnterMaintenanceResult is the result returned to the @@ -36852,7 +36814,6 @@ type HostEnterMaintenanceResult struct { func init() { t["HostEnterMaintenanceResult"] = reflect.TypeOf((*HostEnterMaintenanceResult)(nil)).Elem() - minAPIVersionForType["HostEnterMaintenanceResult"] = "6.7" } type HostEsxAgentHostManagerConfigInfo struct { @@ -36935,7 +36896,6 @@ type HostExtraNetworksEvent struct { func init() { t["HostExtraNetworksEvent"] = reflect.TypeOf((*HostExtraNetworksEvent)(nil)).Elem() - minAPIVersionForType["HostExtraNetworksEvent"] = "4.0" } // Data structure for component health information of a virtual machine. @@ -36950,7 +36910,6 @@ type HostFaultToleranceManagerComponentHealthInfo struct { func init() { t["HostFaultToleranceManagerComponentHealthInfo"] = reflect.TypeOf((*HostFaultToleranceManagerComponentHealthInfo)(nil)).Elem() - minAPIVersionForType["HostFaultToleranceManagerComponentHealthInfo"] = "6.0" } // A feature that the host is able to provide at a particular value. @@ -36969,7 +36928,6 @@ type HostFeatureCapability struct { func init() { t["HostFeatureCapability"] = reflect.TypeOf((*HostFeatureCapability)(nil)).Elem() - minAPIVersionForType["HostFeatureCapability"] = "5.1" } // A mask that is applied to a host feature capability. @@ -36988,7 +36946,6 @@ type HostFeatureMask struct { func init() { t["HostFeatureMask"] = reflect.TypeOf((*HostFeatureMask)(nil)).Elem() - minAPIVersionForType["HostFeatureMask"] = "5.1" } // Feature-specific version information for a host @@ -37004,7 +36961,6 @@ type HostFeatureVersionInfo struct { func init() { t["HostFeatureVersionInfo"] = reflect.TypeOf((*HostFeatureVersionInfo)(nil)).Elem() - minAPIVersionForType["HostFeatureVersionInfo"] = "4.1" } // This data object type describes the Fibre Channel host bus adapter. @@ -37049,7 +37005,6 @@ type HostFibreChannelOverEthernetHba struct { func init() { t["HostFibreChannelOverEthernetHba"] = reflect.TypeOf((*HostFibreChannelOverEthernetHba)(nil)).Elem() - minAPIVersionForType["HostFibreChannelOverEthernetHba"] = "5.0" } // Represents FCoE link information. @@ -37082,7 +37037,6 @@ type HostFibreChannelOverEthernetHbaLinkInfo struct { func init() { t["HostFibreChannelOverEthernetHbaLinkInfo"] = reflect.TypeOf((*HostFibreChannelOverEthernetHbaLinkInfo)(nil)).Elem() - minAPIVersionForType["HostFibreChannelOverEthernetHbaLinkInfo"] = "5.0" } // Fibre Channel Over Ethernet transport information about a SCSI target. @@ -37117,7 +37071,6 @@ type HostFibreChannelOverEthernetTargetTransport struct { func init() { t["HostFibreChannelOverEthernetTargetTransport"] = reflect.TypeOf((*HostFibreChannelOverEthernetTargetTransport)(nil)).Elem() - minAPIVersionForType["HostFibreChannelOverEthernetTargetTransport"] = "5.0" } // Fibre Channel transport information about a SCSI target. @@ -37172,7 +37125,7 @@ type HostFileSystemMountInfo struct { // faster and consumes less CPU, memory, and storage fabric bandwidth. // // For vSphere 4.0 or earlier hosts, this value will be unset. - VStorageSupport string `xml:"vStorageSupport,omitempty" json:"vStorageSupport,omitempty" vim:"4.1"` + VStorageSupport string `xml:"vStorageSupport,omitempty" json:"vStorageSupport,omitempty"` } func init() { @@ -37245,7 +37198,6 @@ type HostFirewallConfig struct { func init() { t["HostFirewallConfig"] = reflect.TypeOf((*HostFirewallConfig)(nil)).Elem() - minAPIVersionForType["HostFirewallConfig"] = "4.0" } type HostFirewallConfigRuleSetConfig struct { @@ -37256,7 +37208,7 @@ type HostFirewallConfigRuleSetConfig struct { // Flag indicating if the specified ruleset should be enabled. Enabled bool `xml:"enabled" json:"enabled"` // The list of allowed ip addresses - AllowedHosts *HostFirewallRulesetIpList `xml:"allowedHosts,omitempty" json:"allowedHosts,omitempty" vim:"5.0"` + AllowedHosts *HostFirewallRulesetIpList `xml:"allowedHosts,omitempty" json:"allowedHosts,omitempty"` } func init() { @@ -37308,7 +37260,7 @@ type HostFirewallRule struct { // The port direction. Direction HostFirewallRuleDirection `xml:"direction" json:"direction"` // The port type. - PortType HostFirewallRulePortType `xml:"portType,omitempty" json:"portType,omitempty" vim:"5.0"` + PortType HostFirewallRulePortType `xml:"portType,omitempty" json:"portType,omitempty"` // The port protocol. // // Valid values are defined by the @@ -37345,7 +37297,7 @@ type HostFirewallRuleset struct { // opened by the firewall. Enabled bool `xml:"enabled" json:"enabled"` // List of ipaddress to allow access to the service - AllowedHosts *HostFirewallRulesetIpList `xml:"allowedHosts,omitempty" json:"allowedHosts,omitempty" vim:"5.0"` + AllowedHosts *HostFirewallRulesetIpList `xml:"allowedHosts,omitempty" json:"allowedHosts,omitempty"` // Flag indicating whether user can enable/disable the firewall ruleset. UserControllable *bool `xml:"userControllable" json:"userControllable,omitempty" vim:"8.0.2.0"` // Flag indicating whether user can modify the allowed IP list of the @@ -37413,7 +37365,6 @@ type HostFirewallRulesetRulesetSpec struct { func init() { t["HostFirewallRulesetRulesetSpec"] = reflect.TypeOf((*HostFirewallRulesetRulesetSpec)(nil)).Elem() - minAPIVersionForType["HostFirewallRulesetRulesetSpec"] = "5.0" } // The FlagInfo data object type encapsulates the flag settings for a host. @@ -37429,7 +37380,6 @@ type HostFlagInfo struct { func init() { t["HostFlagInfo"] = reflect.TypeOf((*HostFlagInfo)(nil)).Elem() - minAPIVersionForType["HostFlagInfo"] = "2.5" } // When the system detects a copy of a VmfsVolume, it will not be @@ -37452,7 +37402,6 @@ type HostForceMountedInfo struct { func init() { t["HostForceMountedInfo"] = reflect.TypeOf((*HostForceMountedInfo)(nil)).Elem() - minAPIVersionForType["HostForceMountedInfo"] = "4.0" } // Data object representing the hardware vendor identity @@ -37502,7 +37451,6 @@ type HostGatewaySpec struct { func init() { t["HostGatewaySpec"] = reflect.TypeOf((*HostGatewaySpec)(nil)).Elem() - minAPIVersionForType["HostGatewaySpec"] = "6.0" } // Deprecated as of vSphere API 5.0, the event is no longer relevant. @@ -37514,7 +37462,6 @@ type HostGetShortNameFailedEvent struct { func init() { t["HostGetShortNameFailedEvent"] = reflect.TypeOf((*HostGetShortNameFailedEvent)(nil)).Elem() - minAPIVersionForType["HostGetShortNameFailedEvent"] = "2.5" } type HostGetVFlashModuleDefaultConfig HostGetVFlashModuleDefaultConfigRequestType @@ -37561,7 +37508,6 @@ type HostGraphicsConfig struct { func init() { t["HostGraphicsConfig"] = reflect.TypeOf((*HostGraphicsConfig)(nil)).Elem() - minAPIVersionForType["HostGraphicsConfig"] = "6.5" } // A particular graphics device with its associated type and mode. @@ -37577,11 +37523,15 @@ type HostGraphicsConfigDeviceType struct { // See `HostGraphicsConfigGraphicsType_enum` for list of // supported values. GraphicsType string `xml:"graphicsType" json:"graphicsType"` + // vGPU mode for this device. + // + // See `HostGraphicsConfigVgpuMode_enum` for list of supported + // values. If this value is unset, the mode remains unchanged. + VgpuMode string `xml:"vgpuMode,omitempty" json:"vgpuMode,omitempty" vim:"8.0.3.0"` } func init() { t["HostGraphicsConfigDeviceType"] = reflect.TypeOf((*HostGraphicsConfigDeviceType)(nil)).Elem() - minAPIVersionForType["HostGraphicsConfigDeviceType"] = "6.5" } // This data object type describes information about a single @@ -37595,8 +37545,16 @@ type HostGraphicsInfo struct { VendorName string `xml:"vendorName" json:"vendorName"` // PCI ID of this device composed of "bus:slot.function". PciId string `xml:"pciId" json:"pciId"` - // Graphics type (@see GraphicsType). + // Graphics type for this device. + // + // See `HostGraphicsInfoGraphicsType_enum` for list + // of supported values. GraphicsType string `xml:"graphicsType" json:"graphicsType"` + // vGPU mode for this device. + // + // See `HostGraphicsInfoVgpuMode_enum` for list of supported + // values. If vgpuMode is not set, it is treated as value "none". + VgpuMode string `xml:"vgpuMode,omitempty" json:"vgpuMode,omitempty" vim:"8.0.3.0"` // Memory capacity of graphics device or zero if not available. MemorySizeInKB int64 `xml:"memorySizeInKB" json:"memorySizeInKB"` // Virtual machines using this graphics device. @@ -37607,7 +37565,6 @@ type HostGraphicsInfo struct { func init() { t["HostGraphicsInfo"] = reflect.TypeOf((*HostGraphicsInfo)(nil)).Elem() - minAPIVersionForType["HostGraphicsInfo"] = "5.5" } // Data object describing the operational status of a physical @@ -37628,7 +37585,6 @@ type HostHardwareElementInfo struct { func init() { t["HostHardwareElementInfo"] = reflect.TypeOf((*HostHardwareElementInfo)(nil)).Elem() - minAPIVersionForType["HostHardwareElementInfo"] = "2.5" } // The HardwareInfo data object type describes the hardware @@ -37649,7 +37605,7 @@ type HostHardwareInfo struct { NumaInfo *HostNumaInfo `xml:"numaInfo,omitempty" json:"numaInfo,omitempty"` // Presence of System Management Controller, indicates the host is // Apple hardware, and thus capable of running Mac OS guest as VM. - SmcPresent *bool `xml:"smcPresent" json:"smcPresent,omitempty" vim:"5.0"` + SmcPresent *bool `xml:"smcPresent" json:"smcPresent,omitempty"` // The list of Peripheral Component Interconnect (PCI) devices // available on this host. PciDevice []HostPciDevice `xml:"pciDevice,omitempty" json:"pciDevice,omitempty"` @@ -37666,13 +37622,13 @@ type HostHardwareInfo struct { // the virtualization platform. CpuFeature []HostCpuIdInfo `xml:"cpuFeature,omitempty" json:"cpuFeature,omitempty"` // Information about the system BIOS - BiosInfo *HostBIOSInfo `xml:"biosInfo,omitempty" json:"biosInfo,omitempty" vim:"2.5"` + BiosInfo *HostBIOSInfo `xml:"biosInfo,omitempty" json:"biosInfo,omitempty"` // Information about reliable memory. - ReliableMemoryInfo *HostReliableMemoryInfo `xml:"reliableMemoryInfo,omitempty" json:"reliableMemoryInfo,omitempty" vim:"5.5"` + ReliableMemoryInfo *HostReliableMemoryInfo `xml:"reliableMemoryInfo,omitempty" json:"reliableMemoryInfo,omitempty"` // Persistent memory configuration on this host. - PersistentMemoryInfo *HostPersistentMemoryInfo `xml:"persistentMemoryInfo,omitempty" json:"persistentMemoryInfo,omitempty" vim:"6.7"` + PersistentMemoryInfo *HostPersistentMemoryInfo `xml:"persistentMemoryInfo,omitempty" json:"persistentMemoryInfo,omitempty"` // SGX configuration on this host. - SgxInfo *HostSgxInfo `xml:"sgxInfo,omitempty" json:"sgxInfo,omitempty" vim:"7.0"` + SgxInfo *HostSgxInfo `xml:"sgxInfo,omitempty" json:"sgxInfo,omitempty"` // SEV configuration on this host. SevInfo *HostSevInfo `xml:"sevInfo,omitempty" json:"sevInfo,omitempty" vim:"7.0.1.0"` // Type of memory tiering configured on this host. @@ -37710,7 +37666,6 @@ type HostHardwareStatusInfo struct { func init() { t["HostHardwareStatusInfo"] = reflect.TypeOf((*HostHardwareStatusInfo)(nil)).Elem() - minAPIVersionForType["HostHardwareStatusInfo"] = "2.5" } // This data object type summarizes hardware used by the host. @@ -37727,7 +37682,7 @@ type HostHardwareSummary struct { // // This information may be vendor // specific. - OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty" json:"otherIdentifyingInfo,omitempty" vim:"2.5"` + OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty" json:"otherIdentifyingInfo,omitempty"` // The physical memory size in bytes. MemorySize int64 `xml:"memorySize" json:"memorySize"` // The CPU model. @@ -37779,7 +37734,6 @@ type HostHasComponentFailure struct { func init() { t["HostHasComponentFailure"] = reflect.TypeOf((*HostHasComponentFailure)(nil)).Elem() - minAPIVersionForType["HostHasComponentFailure"] = "6.0" } type HostHasComponentFailureFault HostHasComponentFailure @@ -37830,7 +37784,7 @@ type HostHostBusAdapter struct { // The list of supported values is described in // `HostStorageProtocol_enum`. // When unset, a default value of "scsi" is assumed. - StorageProtocol string `xml:"storageProtocol,omitempty" json:"storageProtocol,omitempty" vim:"7.0"` + StorageProtocol string `xml:"storageProtocol,omitempty" json:"storageProtocol,omitempty"` } func init() { @@ -37877,10 +37831,10 @@ type HostHyperThreadScheduleInfo struct { // The flag to indicate whether or not the CPU scheduler // should treat hyperthreads as // schedulable resources the next time the CPU scheduler starts. - // - This property is set to "true" by successfully invoking the - // `enableHyperThreading()` method. - // - This property is set to "false" by successfully invoking the - // `disableHyperthreading()` method. + // - This property is set to "true" by successfully invoking the + // `enableHyperThreading()` method. + // - This property is set to "false" by successfully invoking the + // `disableHyperthreading()` method. Config bool `xml:"config" json:"config"` } @@ -37936,7 +37890,6 @@ type HostImageProfileSummary struct { func init() { t["HostImageProfileSummary"] = reflect.TypeOf((*HostImageProfileSummary)(nil)).Elem() - minAPIVersionForType["HostImageProfileSummary"] = "5.0" } // Host is booted in audit mode. @@ -37946,7 +37899,6 @@ type HostInAuditModeEvent struct { func init() { t["HostInAuditModeEvent"] = reflect.TypeOf((*HostInAuditModeEvent)(nil)).Elem() - minAPIVersionForType["HostInAuditModeEvent"] = "5.0" } // Fault indicating that an operation cannot be performed while @@ -37957,7 +37909,6 @@ type HostInDomain struct { func init() { t["HostInDomain"] = reflect.TypeOf((*HostInDomain)(nil)).Elem() - minAPIVersionForType["HostInDomain"] = "4.1" } type HostInDomainFault HostInDomain @@ -37980,7 +37931,6 @@ type HostIncompatibleForFaultTolerance struct { func init() { t["HostIncompatibleForFaultTolerance"] = reflect.TypeOf((*HostIncompatibleForFaultTolerance)(nil)).Elem() - minAPIVersionForType["HostIncompatibleForFaultTolerance"] = "4.0" } type HostIncompatibleForFaultToleranceFault HostIncompatibleForFaultTolerance @@ -38005,7 +37955,6 @@ type HostIncompatibleForRecordReplay struct { func init() { t["HostIncompatibleForRecordReplay"] = reflect.TypeOf((*HostIncompatibleForRecordReplay)(nil)).Elem() - minAPIVersionForType["HostIncompatibleForRecordReplay"] = "4.0" } type HostIncompatibleForRecordReplayFault HostIncompatibleForRecordReplay @@ -38048,10 +37997,10 @@ type HostInternetScsiHba struct { // utilizing the hosting system's existing TCP/IP network connection IsSoftwareBased bool `xml:"isSoftwareBased" json:"isSoftwareBased"` // Can this adapter be disabled - CanBeDisabled *bool `xml:"canBeDisabled" json:"canBeDisabled,omitempty" vim:"5.0"` + CanBeDisabled *bool `xml:"canBeDisabled" json:"canBeDisabled,omitempty"` // Specifies if this iSCSI Adapter requires a bound network // interface to function. - NetworkBindingSupport HostInternetScsiHbaNetworkBindingSupportType `xml:"networkBindingSupport,omitempty" json:"networkBindingSupport,omitempty" vim:"5.0"` + NetworkBindingSupport HostInternetScsiHbaNetworkBindingSupportType `xml:"networkBindingSupport,omitempty" json:"networkBindingSupport,omitempty"` // The discovery capabilities for this host bus adapter. DiscoveryCapabilities HostInternetScsiHbaDiscoveryCapabilities `xml:"discoveryCapabilities" json:"discoveryCapabilities"` // The discovery settings for this host bus adapter. @@ -38064,21 +38013,21 @@ type HostInternetScsiHba struct { // settings unless their authentication settings are explicitly set. AuthenticationProperties HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties" json:"authenticationProperties"` // The authentication capabilities for this host bus adapter. - DigestCapabilities *HostInternetScsiHbaDigestCapabilities `xml:"digestCapabilities,omitempty" json:"digestCapabilities,omitempty" vim:"4.0"` + DigestCapabilities *HostInternetScsiHbaDigestCapabilities `xml:"digestCapabilities,omitempty" json:"digestCapabilities,omitempty"` // The digest settings for this host bus adapter. // // All static and discovery targets will inherit the use of these // properties unless their digest settings are explicitly set. - DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty" json:"digestProperties,omitempty" vim:"4.0"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty" json:"digestProperties,omitempty"` // The IP capabilities for this host bus adapter. IpCapabilities HostInternetScsiHbaIPCapabilities `xml:"ipCapabilities" json:"ipCapabilities"` // The IP settings for this host bus adapter. IpProperties HostInternetScsiHbaIPProperties `xml:"ipProperties" json:"ipProperties"` // A list of supported key/value pair advanced options for the // host bus adapter including their type information. - SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty" json:"supportedAdvancedOptions,omitempty" vim:"4.0"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty" json:"supportedAdvancedOptions,omitempty"` // A list of the current options settings for the host bus adapter. - AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty" json:"advancedOptions,omitempty" vim:"4.0"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty" json:"advancedOptions,omitempty"` // The iSCSI name of this host bus adapter. IScsiName string `xml:"iScsiName" json:"iScsiName"` // The iSCSI alias of this host bus adapter. @@ -38115,15 +38064,15 @@ type HostInternetScsiHbaAuthenticationCapabilities struct { SpkmAuthSettable bool `xml:"spkmAuthSettable" json:"spkmAuthSettable"` // When chapAuthSettable is TRUE, this describes if Mutual CHAP // configuration is allowed as well. - MutualChapSettable *bool `xml:"mutualChapSettable" json:"mutualChapSettable,omitempty" vim:"4.0"` + MutualChapSettable *bool `xml:"mutualChapSettable" json:"mutualChapSettable,omitempty"` // When targetChapSettable is TRUE, this describes if // CHAP configuration is allowed on targets associated // with the adapter. - TargetChapSettable *bool `xml:"targetChapSettable" json:"targetChapSettable,omitempty" vim:"4.0"` + TargetChapSettable *bool `xml:"targetChapSettable" json:"targetChapSettable,omitempty"` // When targetMutualChapSettable is TRUE, this describes if // Mutual CHAP configuration is allowed on targets associated // with the adapter. - TargetMutualChapSettable *bool `xml:"targetMutualChapSettable" json:"targetMutualChapSettable,omitempty" vim:"4.0"` + TargetMutualChapSettable *bool `xml:"targetMutualChapSettable" json:"targetMutualChapSettable,omitempty"` } func init() { @@ -38141,19 +38090,19 @@ type HostInternetScsiHbaAuthenticationProperties struct { // The CHAP secret if enabled ChapSecret string `xml:"chapSecret,omitempty" json:"chapSecret,omitempty"` // The preference for CHAP or non-CHAP protocol if CHAP is enabled - ChapAuthenticationType string `xml:"chapAuthenticationType,omitempty" json:"chapAuthenticationType,omitempty" vim:"4.0"` + ChapAuthenticationType string `xml:"chapAuthenticationType,omitempty" json:"chapAuthenticationType,omitempty"` // CHAP settings are inherited - ChapInherited *bool `xml:"chapInherited" json:"chapInherited,omitempty" vim:"4.0"` + ChapInherited *bool `xml:"chapInherited" json:"chapInherited,omitempty"` // When Mutual-CHAP is enabled, the user name that target needs to // use to authenticate with the initiator - MutualChapName string `xml:"mutualChapName,omitempty" json:"mutualChapName,omitempty" vim:"4.0"` + MutualChapName string `xml:"mutualChapName,omitempty" json:"mutualChapName,omitempty"` // When Mutual-CHAP is enabled, the secret that target needs to // use to authenticate with the initiator - MutualChapSecret string `xml:"mutualChapSecret,omitempty" json:"mutualChapSecret,omitempty" vim:"4.0"` + MutualChapSecret string `xml:"mutualChapSecret,omitempty" json:"mutualChapSecret,omitempty"` // The preference for CHAP or non-CHAP protocol if CHAP is enabled - MutualChapAuthenticationType string `xml:"mutualChapAuthenticationType,omitempty" json:"mutualChapAuthenticationType,omitempty" vim:"4.0"` + MutualChapAuthenticationType string `xml:"mutualChapAuthenticationType,omitempty" json:"mutualChapAuthenticationType,omitempty"` // Mutual-CHAP settings are inherited - MutualChapInherited *bool `xml:"mutualChapInherited" json:"mutualChapInherited,omitempty" vim:"4.0"` + MutualChapInherited *bool `xml:"mutualChapInherited" json:"mutualChapInherited,omitempty"` } func init() { @@ -38192,7 +38141,6 @@ type HostInternetScsiHbaDigestCapabilities struct { func init() { t["HostInternetScsiHbaDigestCapabilities"] = reflect.TypeOf((*HostInternetScsiHbaDigestCapabilities)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaDigestCapabilities"] = "4.0" } // The digest settings for this host bus adapter. @@ -38211,7 +38159,6 @@ type HostInternetScsiHbaDigestProperties struct { func init() { t["HostInternetScsiHbaDigestProperties"] = reflect.TypeOf((*HostInternetScsiHbaDigestProperties)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaDigestProperties"] = "4.0" } // The discovery capabilities for this host bus adapter. @@ -38292,43 +38239,43 @@ type HostInternetScsiHbaIPCapabilities struct { // True if the host bus adapter supports setting its secondary DNS. AlternateDnsServerAddressSettable bool `xml:"alternateDnsServerAddressSettable" json:"alternateDnsServerAddressSettable"` // True if the host bus adapter supports the use of IPv6 addresses - Ipv6Supported *bool `xml:"ipv6Supported" json:"ipv6Supported,omitempty" vim:"4.0"` + Ipv6Supported *bool `xml:"ipv6Supported" json:"ipv6Supported,omitempty"` // True if the host bus adapter supports setting its ARP Redirect value - ArpRedirectSettable *bool `xml:"arpRedirectSettable" json:"arpRedirectSettable,omitempty" vim:"4.0"` + ArpRedirectSettable *bool `xml:"arpRedirectSettable" json:"arpRedirectSettable,omitempty"` // True if the host bus adapter supports setting its MTU, (for Jumbo // Frames, etc) - MtuSettable *bool `xml:"mtuSettable" json:"mtuSettable,omitempty" vim:"4.0"` + MtuSettable *bool `xml:"mtuSettable" json:"mtuSettable,omitempty"` // True if the discovery and static targets can be configured with // a host name as opposed to an IP address. - HostNameAsTargetAddress *bool `xml:"hostNameAsTargetAddress" json:"hostNameAsTargetAddress,omitempty" vim:"4.0"` + HostNameAsTargetAddress *bool `xml:"hostNameAsTargetAddress" json:"hostNameAsTargetAddress,omitempty"` // True if the host bus adapter supports setting its name and alias - NameAliasSettable *bool `xml:"nameAliasSettable" json:"nameAliasSettable,omitempty" vim:"4.1"` + NameAliasSettable *bool `xml:"nameAliasSettable" json:"nameAliasSettable,omitempty"` // True if IPv4 addresssing can be enabled or disabled on the host bus adapter. - Ipv4EnableSettable *bool `xml:"ipv4EnableSettable" json:"ipv4EnableSettable,omitempty" vim:"6.0"` + Ipv4EnableSettable *bool `xml:"ipv4EnableSettable" json:"ipv4EnableSettable,omitempty"` // True if IPv6 addresssing can be enabled or disabled on the host bus adapter. - Ipv6EnableSettable *bool `xml:"ipv6EnableSettable" json:"ipv6EnableSettable,omitempty" vim:"6.0"` + Ipv6EnableSettable *bool `xml:"ipv6EnableSettable" json:"ipv6EnableSettable,omitempty"` // True if the Host bus adapter supports setting IPv6 Prefix Length. - Ipv6PrefixLengthSettable *bool `xml:"ipv6PrefixLengthSettable" json:"ipv6PrefixLengthSettable,omitempty" vim:"6.0"` + Ipv6PrefixLengthSettable *bool `xml:"ipv6PrefixLengthSettable" json:"ipv6PrefixLengthSettable,omitempty"` // Provides the value that user should be using if host bus adapter // does not support changing of prefix length. - Ipv6PrefixLength int32 `xml:"ipv6PrefixLength,omitempty" json:"ipv6PrefixLength,omitempty" vim:"6.0"` + Ipv6PrefixLength int32 `xml:"ipv6PrefixLength,omitempty" json:"ipv6PrefixLength,omitempty"` // True if the Host bus adapter supports DHCPv6 configuration. - Ipv6DhcpConfigurationSettable *bool `xml:"ipv6DhcpConfigurationSettable" json:"ipv6DhcpConfigurationSettable,omitempty" vim:"6.0"` + Ipv6DhcpConfigurationSettable *bool `xml:"ipv6DhcpConfigurationSettable" json:"ipv6DhcpConfigurationSettable,omitempty"` // True if the Host bus adapter supports setting configuration of its IPv6 link local address // User can specify link local static address if link local auto configuration is set to false. // // link local address usually starts with fe80: and has prefix 64. - Ipv6LinkLocalAutoConfigurationSettable *bool `xml:"ipv6LinkLocalAutoConfigurationSettable" json:"ipv6LinkLocalAutoConfigurationSettable,omitempty" vim:"6.0"` + Ipv6LinkLocalAutoConfigurationSettable *bool `xml:"ipv6LinkLocalAutoConfigurationSettable" json:"ipv6LinkLocalAutoConfigurationSettable,omitempty"` // True if the Host bus adapter supports router advertisement configuration method. // // Note: Currently Qlogic adapter does not support plumbing of any user specified // static address if router advertisement method is enabled. - Ipv6RouterAdvertisementConfigurationSettable *bool `xml:"ipv6RouterAdvertisementConfigurationSettable" json:"ipv6RouterAdvertisementConfigurationSettable,omitempty" vim:"6.0"` + Ipv6RouterAdvertisementConfigurationSettable *bool `xml:"ipv6RouterAdvertisementConfigurationSettable" json:"ipv6RouterAdvertisementConfigurationSettable,omitempty"` // True if the Host bus adapter supports setting its IPv6 default gateway. - Ipv6DefaultGatewaySettable *bool `xml:"ipv6DefaultGatewaySettable" json:"ipv6DefaultGatewaySettable,omitempty" vim:"6.0"` + Ipv6DefaultGatewaySettable *bool `xml:"ipv6DefaultGatewaySettable" json:"ipv6DefaultGatewaySettable,omitempty"` // The maximum number of supported IPv6 static addresses on the // host bus adapter that user can set. - Ipv6MaxStaticAddressesSupported int32 `xml:"ipv6MaxStaticAddressesSupported,omitempty" json:"ipv6MaxStaticAddressesSupported,omitempty" vim:"6.0"` + Ipv6MaxStaticAddressesSupported int32 `xml:"ipv6MaxStaticAddressesSupported,omitempty" json:"ipv6MaxStaticAddressesSupported,omitempty"` } func init() { @@ -38356,35 +38303,35 @@ type HostInternetScsiHbaIPProperties struct { // Deprecated since vSphere API 5.5 use { @link IPProperties#ipv6properties }. // // The current IPv6 address. - Ipv6Address string `xml:"ipv6Address,omitempty" json:"ipv6Address,omitempty" vim:"4.0"` + Ipv6Address string `xml:"ipv6Address,omitempty" json:"ipv6Address,omitempty"` // Deprecated since vSphere API 5.5 use { @link IPProperties#ipv6properties }. // // The current IPv6 subnet mask. - Ipv6SubnetMask string `xml:"ipv6SubnetMask,omitempty" json:"ipv6SubnetMask,omitempty" vim:"4.0"` + Ipv6SubnetMask string `xml:"ipv6SubnetMask,omitempty" json:"ipv6SubnetMask,omitempty"` // Deprecated since vSphere API 5.5 use { @link IPProperties#ipv6properties }. // // The current IPv6 default gateway. - Ipv6DefaultGateway string `xml:"ipv6DefaultGateway,omitempty" json:"ipv6DefaultGateway,omitempty" vim:"4.0"` + Ipv6DefaultGateway string `xml:"ipv6DefaultGateway,omitempty" json:"ipv6DefaultGateway,omitempty"` // True if ARP Redirect is enabled - ArpRedirectEnabled *bool `xml:"arpRedirectEnabled" json:"arpRedirectEnabled,omitempty" vim:"4.0"` + ArpRedirectEnabled *bool `xml:"arpRedirectEnabled" json:"arpRedirectEnabled,omitempty"` // True if the host bus adapter supports setting its MTU, (for Jumbo // Frames, etc) // Setting enableJumboFrames and not a numeric mtu value implies // autoselection of appropriate MTU value for Jumbo Frames. - Mtu int32 `xml:"mtu,omitempty" json:"mtu,omitempty" vim:"4.0"` + Mtu int32 `xml:"mtu,omitempty" json:"mtu,omitempty"` JumboFramesEnabled *bool `xml:"jumboFramesEnabled" json:"jumboFramesEnabled,omitempty"` // True if IPv4 is enabled. // // Unset value will keep existing IPv4 enabled state as is. - Ipv4Enabled *bool `xml:"ipv4Enabled" json:"ipv4Enabled,omitempty" vim:"6.0"` + Ipv4Enabled *bool `xml:"ipv4Enabled" json:"ipv4Enabled,omitempty"` // True if IPv6 is enabled. // // Unset value will keep existing IPv6 enabled state as is. - Ipv6Enabled *bool `xml:"ipv6Enabled" json:"ipv6Enabled,omitempty" vim:"6.0"` + Ipv6Enabled *bool `xml:"ipv6Enabled" json:"ipv6Enabled,omitempty"` // IPv6 properties. // // It is set only if { @link #ipv6Enabled } is true. - Ipv6properties *HostInternetScsiHbaIPv6Properties `xml:"ipv6properties,omitempty" json:"ipv6properties,omitempty" vim:"6.0"` + Ipv6properties *HostInternetScsiHbaIPv6Properties `xml:"ipv6properties,omitempty" json:"ipv6properties,omitempty"` } func init() { @@ -38421,7 +38368,6 @@ type HostInternetScsiHbaIPv6Properties struct { func init() { t["HostInternetScsiHbaIPv6Properties"] = reflect.TypeOf((*HostInternetScsiHbaIPv6Properties)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaIPv6Properties"] = "6.0" } // The IPv6 address. @@ -38448,7 +38394,6 @@ type HostInternetScsiHbaIscsiIpv6Address struct { func init() { t["HostInternetScsiHbaIscsiIpv6Address"] = reflect.TypeOf((*HostInternetScsiHbaIscsiIpv6Address)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaIscsiIpv6Address"] = "6.0" } // Describes the the value of an iSCSI parameter, and whether @@ -38468,7 +38413,6 @@ type HostInternetScsiHbaParamValue struct { func init() { t["HostInternetScsiHbaParamValue"] = reflect.TypeOf((*HostInternetScsiHbaParamValue)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaParamValue"] = "4.0" } // The iSCSI send target. @@ -38486,21 +38430,21 @@ type HostInternetScsiHbaSendTarget struct { // All static targets discovered via this target will inherit the // use of these settings unless the static target's authentication // settings are explicitly set. - AuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties,omitempty" json:"authenticationProperties,omitempty" vim:"4.0"` + AuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties,omitempty" json:"authenticationProperties,omitempty"` // The digest settings for this discovery target. // // All static targets discovered via this target will inherit the // use of these settings unless the static target's digest // settings are explicitly set. - DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty" json:"digestProperties,omitempty" vim:"4.0"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty" json:"digestProperties,omitempty"` // A list of supported key/value pair advanced options for the // host bus adapter including their type information. - SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty" json:"supportedAdvancedOptions,omitempty" vim:"4.0"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty" json:"supportedAdvancedOptions,omitempty"` // A list of the current options settings for the host bus adapter. - AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty" json:"advancedOptions,omitempty" vim:"4.0"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty" json:"advancedOptions,omitempty"` // The device name of the host bus adapter from which settings // can be inherited. - Parent string `xml:"parent,omitempty" json:"parent,omitempty" vim:"4.0"` + Parent string `xml:"parent,omitempty" json:"parent,omitempty"` } func init() { @@ -38522,22 +38466,22 @@ type HostInternetScsiHbaStaticTarget struct { // Discovery method // each static target is discovered by some method // define in TargetDiscoveryMethod. - DiscoveryMethod string `xml:"discoveryMethod,omitempty" json:"discoveryMethod,omitempty" vim:"5.1"` + DiscoveryMethod string `xml:"discoveryMethod,omitempty" json:"discoveryMethod,omitempty"` // The authentication settings for this target. - AuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties,omitempty" json:"authenticationProperties,omitempty" vim:"4.0"` + AuthenticationProperties *HostInternetScsiHbaAuthenticationProperties `xml:"authenticationProperties,omitempty" json:"authenticationProperties,omitempty"` // The digest settings for this target. - DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty" json:"digestProperties,omitempty" vim:"4.0"` + DigestProperties *HostInternetScsiHbaDigestProperties `xml:"digestProperties,omitempty" json:"digestProperties,omitempty"` // A list of supported key/value pair advanced options for the // host bus adapter including their type information. - SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty" json:"supportedAdvancedOptions,omitempty" vim:"4.0"` + SupportedAdvancedOptions []OptionDef `xml:"supportedAdvancedOptions,omitempty" json:"supportedAdvancedOptions,omitempty"` // A list of the current options settings for the host bus adapter. - AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty" json:"advancedOptions,omitempty" vim:"4.0"` + AdvancedOptions []HostInternetScsiHbaParamValue `xml:"advancedOptions,omitempty" json:"advancedOptions,omitempty"` // The parent entity from which settings can be inherited. // // It can either // be unset, or set to the device name of the host bus adapter or the // name of the SendTarget. - Parent string `xml:"parent,omitempty" json:"parent,omitempty" vim:"4.0"` + Parent string `xml:"parent,omitempty" json:"parent,omitempty"` } func init() { @@ -38556,7 +38500,6 @@ type HostInternetScsiHbaTargetSet struct { func init() { t["HostInternetScsiHbaTargetSet"] = reflect.TypeOf((*HostInternetScsiHbaTargetSet)(nil)).Elem() - minAPIVersionForType["HostInternetScsiHbaTargetSet"] = "4.0" } // Internet SCSI transport information about a SCSI target. @@ -38584,7 +38527,6 @@ type HostInventoryFull struct { func init() { t["HostInventoryFull"] = reflect.TypeOf((*HostInventoryFull)(nil)).Elem() - minAPIVersionForType["HostInventoryFull"] = "2.5" } // This event records if the inventory of hosts has reached capacity. @@ -38596,7 +38538,6 @@ type HostInventoryFullEvent struct { func init() { t["HostInventoryFullEvent"] = reflect.TypeOf((*HostInventoryFullEvent)(nil)).Elem() - minAPIVersionForType["HostInventoryFullEvent"] = "2.5" } type HostInventoryFullFault HostInventoryFull @@ -38613,7 +38554,6 @@ type HostInventoryUnreadableEvent struct { func init() { t["HostInventoryUnreadableEvent"] = reflect.TypeOf((*HostInventoryUnreadableEvent)(nil)).Elem() - minAPIVersionForType["HostInventoryUnreadableEvent"] = "4.0" } // Information about an IO Filter installed on a host. @@ -38626,7 +38566,6 @@ type HostIoFilterInfo struct { func init() { t["HostIoFilterInfo"] = reflect.TypeOf((*HostIoFilterInfo)(nil)).Elem() - minAPIVersionForType["HostIoFilterInfo"] = "6.0" } // This event records a change in host IP address. @@ -38641,7 +38580,6 @@ type HostIpChangedEvent struct { func init() { t["HostIpChangedEvent"] = reflect.TypeOf((*HostIpChangedEvent)(nil)).Elem() - minAPIVersionForType["HostIpChangedEvent"] = "2.5" } // The IP configuration. @@ -38671,7 +38609,7 @@ type HostIpConfig struct { // current IP configuration and cannot be set. SubnetMask string `xml:"subnetMask,omitempty" json:"subnetMask,omitempty"` // The ipv6 configuration - IpV6Config *HostIpConfigIpV6AddressConfiguration `xml:"ipV6Config,omitempty" json:"ipV6Config,omitempty" vim:"4.0"` + IpV6Config *HostIpConfigIpV6AddressConfiguration `xml:"ipV6Config,omitempty" json:"ipV6Config,omitempty"` } func init() { @@ -38718,7 +38656,6 @@ type HostIpConfigIpV6Address struct { func init() { t["HostIpConfigIpV6Address"] = reflect.TypeOf((*HostIpConfigIpV6Address)(nil)).Elem() - minAPIVersionForType["HostIpConfigIpV6Address"] = "4.0" } // The ipv6 address configuration @@ -38744,7 +38681,6 @@ type HostIpConfigIpV6AddressConfiguration struct { func init() { t["HostIpConfigIpV6AddressConfiguration"] = reflect.TypeOf((*HostIpConfigIpV6AddressConfiguration)(nil)).Elem() - minAPIVersionForType["HostIpConfigIpV6AddressConfiguration"] = "4.0" } // Deprecated as of vSphere API 5.0, the event is no longer relevant. @@ -38762,7 +38698,6 @@ type HostIpInconsistentEvent struct { func init() { t["HostIpInconsistentEvent"] = reflect.TypeOf((*HostIpInconsistentEvent)(nil)).Elem() - minAPIVersionForType["HostIpInconsistentEvent"] = "2.5" } // IP Route Configuration. @@ -38786,11 +38721,11 @@ type HostIpRouteConfig struct { // is ignored otherwise. GatewayDevice string `xml:"gatewayDevice,omitempty" json:"gatewayDevice,omitempty"` // The default ipv6 gateway address - IpV6DefaultGateway string `xml:"ipV6DefaultGateway,omitempty" json:"ipV6DefaultGateway,omitempty" vim:"4.0"` + IpV6DefaultGateway string `xml:"ipV6DefaultGateway,omitempty" json:"ipV6DefaultGateway,omitempty"` // The ipv6 gateway device. // // This applies to service console gateway only, it - IpV6GatewayDevice string `xml:"ipV6GatewayDevice,omitempty" json:"ipV6GatewayDevice,omitempty" vim:"4.0"` + IpV6GatewayDevice string `xml:"ipV6GatewayDevice,omitempty" json:"ipV6GatewayDevice,omitempty"` } func init() { @@ -38815,7 +38750,6 @@ type HostIpRouteConfigSpec struct { func init() { t["HostIpRouteConfigSpec"] = reflect.TypeOf((*HostIpRouteConfigSpec)(nil)).Elem() - minAPIVersionForType["HostIpRouteConfigSpec"] = "4.0" } // IpRouteEntry. @@ -38837,12 +38771,11 @@ type HostIpRouteEntry struct { // // This property can only be read from the server. // It will be ignored if set by the client. - DeviceName string `xml:"deviceName,omitempty" json:"deviceName,omitempty" vim:"4.1"` + DeviceName string `xml:"deviceName,omitempty" json:"deviceName,omitempty"` } func init() { t["HostIpRouteEntry"] = reflect.TypeOf((*HostIpRouteEntry)(nil)).Elem() - minAPIVersionForType["HostIpRouteEntry"] = "4.0" } // Routing Entry Operation. @@ -38864,7 +38797,6 @@ type HostIpRouteOp struct { func init() { t["HostIpRouteOp"] = reflect.TypeOf((*HostIpRouteOp)(nil)).Elem() - minAPIVersionForType["HostIpRouteOp"] = "4.0" } // IpRouteEntry. @@ -38881,7 +38813,6 @@ type HostIpRouteTableConfig struct { func init() { t["HostIpRouteTableConfig"] = reflect.TypeOf((*HostIpRouteTableConfig)(nil)).Elem() - minAPIVersionForType["HostIpRouteTableConfig"] = "4.0" } // IpRouteTableInfo. @@ -38897,7 +38828,6 @@ type HostIpRouteTableInfo struct { func init() { t["HostIpRouteTableInfo"] = reflect.TypeOf((*HostIpRouteTableInfo)(nil)).Elem() - minAPIVersionForType["HostIpRouteTableInfo"] = "4.0" } // Deprecated as of vSphere API 5.0, the event is no longer relevant. @@ -38909,7 +38839,6 @@ type HostIpToShortNameFailedEvent struct { func init() { t["HostIpToShortNameFailedEvent"] = reflect.TypeOf((*HostIpToShortNameFailedEvent)(nil)).Elem() - minAPIVersionForType["HostIpToShortNameFailedEvent"] = "2.5" } // The IpmiInfo data object contains IPMI (Intelligent Platform Management Interface) @@ -38943,7 +38872,6 @@ type HostIpmiInfo struct { func init() { t["HostIpmiInfo"] = reflect.TypeOf((*HostIpmiInfo)(nil)).Elem() - minAPIVersionForType["HostIpmiInfo"] = "4.0" } // This event records that the isolation address could not be pinged. @@ -38957,7 +38885,6 @@ type HostIsolationIpPingFailedEvent struct { func init() { t["HostIsolationIpPingFailedEvent"] = reflect.TypeOf((*HostIsolationIpPingFailedEvent)(nil)).Elem() - minAPIVersionForType["HostIsolationIpPingFailedEvent"] = "2.5" } // Encapsulates information about all licensable resources on the host. @@ -38976,7 +38903,6 @@ type HostLicensableResourceInfo struct { func init() { t["HostLicensableResourceInfo"] = reflect.TypeOf((*HostLicensableResourceInfo)(nil)).Elem() - minAPIVersionForType["HostLicensableResourceInfo"] = "5.0" } // This data object type describes license information stored on the host. @@ -38991,12 +38917,11 @@ type HostLicenseConnectInfo struct { // // NOTE: // The values in this property may not be accurate for pre-5.0 hosts when returned by vCenter 5.0 - Resource *HostLicensableResourceInfo `xml:"resource,omitempty" json:"resource,omitempty" vim:"5.0"` + Resource *HostLicensableResourceInfo `xml:"resource,omitempty" json:"resource,omitempty"` } func init() { t["HostLicenseConnectInfo"] = reflect.TypeOf((*HostLicenseConnectInfo)(nil)).Elem() - minAPIVersionForType["HostLicenseConnectInfo"] = "4.0" } // This event records an expired host license. @@ -39068,13 +38993,13 @@ type HostListSummary struct { // The customized field values. CustomValue []BaseCustomFieldValue `xml:"customValue,omitempty,typeattr" json:"customValue,omitempty"` // IP address of the VirtualCenter server managing this host, if any. - ManagementServerIp string `xml:"managementServerIp,omitempty" json:"managementServerIp,omitempty" vim:"2.5"` + ManagementServerIp string `xml:"managementServerIp,omitempty" json:"managementServerIp,omitempty"` // The most capable Enhanced VMotion Compatibility mode supported by the // host hardware and software; unset if this host cannot participate in // any EVC mode. // // See also `Capability.supportedEVCMode`. - MaxEVCModeKey string `xml:"maxEVCModeKey,omitempty" json:"maxEVCModeKey,omitempty" vim:"4.0"` + MaxEVCModeKey string `xml:"maxEVCModeKey,omitempty" json:"maxEVCModeKey,omitempty"` // The Enhanced VMotion Compatibility mode that is currently in effect // for this host. // @@ -39082,7 +39007,7 @@ type HostListSummary struct { // will match the cluster's EVC mode; otherwise this will be unset. // // See also `Capability.supportedEVCMode`. - CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty" json:"currentEVCModeKey,omitempty" vim:"4.0"` + CurrentEVCModeKey string `xml:"currentEVCModeKey,omitempty" json:"currentEVCModeKey,omitempty"` // The Enhanced VMotion Compatibility Graphics mode that is currently in // effect for this host. // @@ -39093,7 +39018,7 @@ type HostListSummary struct { // See also `Capability.supportedEVCGraphicsMode`. CurrentEVCGraphicsModeKey string `xml:"currentEVCGraphicsModeKey,omitempty" json:"currentEVCGraphicsModeKey,omitempty" vim:"7.0.1.0"` // Gateway configuration, if vCenter server manages the host via a gateway - Gateway *HostListSummaryGatewaySummary `xml:"gateway,omitempty" json:"gateway,omitempty" vim:"6.0"` + Gateway *HostListSummaryGatewaySummary `xml:"gateway,omitempty" json:"gateway,omitempty"` TpmAttestation *HostTpmAttestationInfo `xml:"tpmAttestation,omitempty" json:"tpmAttestation,omitempty"` // The attestation information for the host as retrieved from any Trust // Authority attestation services configured in the host's parent compute @@ -39134,7 +39059,6 @@ type HostListSummaryGatewaySummary struct { func init() { t["HostListSummaryGatewaySummary"] = reflect.TypeOf((*HostListSummaryGatewaySummary)(nil)).Elem() - minAPIVersionForType["HostListSummaryGatewaySummary"] = "6.0" } // Basic host statistics. @@ -39165,9 +39089,9 @@ type HostListSummaryQuickStats struct { // The fairness of distributed memory resource allocation on the host. DistributedMemoryFairness int32 `xml:"distributedMemoryFairness,omitempty" json:"distributedMemoryFairness,omitempty"` // The available capacity in MB. - AvailablePMemCapacity int32 `xml:"availablePMemCapacity,omitempty" json:"availablePMemCapacity,omitempty" vim:"6.7"` + AvailablePMemCapacity int32 `xml:"availablePMemCapacity,omitempty" json:"availablePMemCapacity,omitempty"` // The system uptime of the host in seconds. - Uptime int32 `xml:"uptime,omitempty" json:"uptime,omitempty" vim:"4.1"` + Uptime int32 `xml:"uptime,omitempty" json:"uptime,omitempty"` } func init() { @@ -39208,7 +39132,6 @@ type HostLocalAuthenticationInfo struct { func init() { t["HostLocalAuthenticationInfo"] = reflect.TypeOf((*HostLocalAuthenticationInfo)(nil)).Elem() - minAPIVersionForType["HostLocalAuthenticationInfo"] = "4.1" } // Local file system volume. @@ -39248,7 +39171,6 @@ type HostLocalPortCreatedEvent struct { func init() { t["HostLocalPortCreatedEvent"] = reflect.TypeOf((*HostLocalPortCreatedEvent)(nil)).Elem() - minAPIVersionForType["HostLocalPortCreatedEvent"] = "5.1" } // File layout spec of a virtual disk. @@ -39275,7 +39197,6 @@ type HostLowLevelProvisioningManagerDiskLayoutSpec struct { func init() { t["HostLowLevelProvisioningManagerDiskLayoutSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerDiskLayoutSpec)(nil)).Elem() - minAPIVersionForType["HostLowLevelProvisioningManagerDiskLayoutSpec"] = "5.0" } type HostLowLevelProvisioningManagerFileDeleteResult struct { @@ -39316,7 +39237,6 @@ type HostLowLevelProvisioningManagerFileReserveResult struct { func init() { t["HostLowLevelProvisioningManagerFileReserveResult"] = reflect.TypeOf((*HostLowLevelProvisioningManagerFileReserveResult)(nil)).Elem() - minAPIVersionForType["HostLowLevelProvisioningManagerFileReserveResult"] = "6.0" } type HostLowLevelProvisioningManagerFileReserveSpec struct { @@ -39350,7 +39270,6 @@ type HostLowLevelProvisioningManagerSnapshotLayoutSpec struct { func init() { t["HostLowLevelProvisioningManagerSnapshotLayoutSpec"] = reflect.TypeOf((*HostLowLevelProvisioningManagerSnapshotLayoutSpec)(nil)).Elem() - minAPIVersionForType["HostLowLevelProvisioningManagerSnapshotLayoutSpec"] = "5.0" } // The status of a virtual machine migration operation. @@ -39391,7 +39310,6 @@ type HostLowLevelProvisioningManagerVmMigrationStatus struct { func init() { t["HostLowLevelProvisioningManagerVmMigrationStatus"] = reflect.TypeOf((*HostLowLevelProvisioningManagerVmMigrationStatus)(nil)).Elem() - minAPIVersionForType["HostLowLevelProvisioningManagerVmMigrationStatus"] = "5.1" } // Virtual machine information that can be used for recovery, for @@ -39433,7 +39351,6 @@ type HostLowLevelProvisioningManagerVmRecoveryInfo struct { func init() { t["HostLowLevelProvisioningManagerVmRecoveryInfo"] = reflect.TypeOf((*HostLowLevelProvisioningManagerVmRecoveryInfo)(nil)).Elem() - minAPIVersionForType["HostLowLevelProvisioningManagerVmRecoveryInfo"] = "5.1" } // The `HostMaintenanceSpec` data object may be used to specify @@ -39452,12 +39369,11 @@ type HostMaintenanceSpec struct { // Maintenance mode reason code. // // See `HostMaintenanceSpecPurpose_enum` for valid values. - Purpose string `xml:"purpose,omitempty" json:"purpose,omitempty" vim:"7.0"` + Purpose string `xml:"purpose,omitempty" json:"purpose,omitempty"` } func init() { t["HostMaintenanceSpec"] = reflect.TypeOf((*HostMaintenanceSpec)(nil)).Elem() - minAPIVersionForType["HostMaintenanceSpec"] = "5.5" } // This class defines healthcheck result of the vSphere Distributed Switch. @@ -39470,7 +39386,6 @@ type HostMemberHealthCheckResult struct { func init() { t["HostMemberHealthCheckResult"] = reflect.TypeOf((*HostMemberHealthCheckResult)(nil)).Elem() - minAPIVersionForType["HostMemberHealthCheckResult"] = "5.1" } // The `HostMemberRuntimeInfo` data object @@ -39496,16 +39411,20 @@ type HostMemberRuntimeInfo struct { // `DistributedVirtualSwitchHostMember*.*DistributedVirtualSwitchHostMember.statusDetail`. StatusDetail string `xml:"statusDetail,omitempty" json:"statusDetail,omitempty"` // NSX-T component status. - NsxtStatus string `xml:"nsxtStatus,omitempty" json:"nsxtStatus,omitempty" vim:"7.0"` + NsxtStatus string `xml:"nsxtStatus,omitempty" json:"nsxtStatus,omitempty"` // Additional information regarding the NSX-T component status. - NsxtStatusDetail string `xml:"nsxtStatusDetail,omitempty" json:"nsxtStatusDetail,omitempty" vim:"7.0"` + NsxtStatusDetail string `xml:"nsxtStatusDetail,omitempty" json:"nsxtStatusDetail,omitempty"` // Health check result for the host that joined the distributed virtual switch. HealthCheckResult []BaseHostMemberHealthCheckResult `xml:"healthCheckResult,omitempty,typeattr" json:"healthCheckResult,omitempty"` + // Indicate the runtime state of uplink on the host. + // + // It is only applicable when `DistributedVirtualSwitchHostMemberConfigInfo.networkOffloadingEnabled` + // is true. + HostUplinkState []DistributedVirtualSwitchHostMemberHostUplinkState `xml:"hostUplinkState,omitempty" json:"hostUplinkState,omitempty" vim:"8.0.3.0"` } func init() { t["HostMemberRuntimeInfo"] = reflect.TypeOf((*HostMemberRuntimeInfo)(nil)).Elem() - minAPIVersionForType["HostMemberRuntimeInfo"] = "5.1" } // This class defines healthcheck result of a specified Uplink port @@ -39519,7 +39438,6 @@ type HostMemberUplinkHealthCheckResult struct { func init() { t["HostMemberUplinkHealthCheckResult"] = reflect.TypeOf((*HostMemberUplinkHealthCheckResult)(nil)).Elem() - minAPIVersionForType["HostMemberUplinkHealthCheckResult"] = "5.1" } // The `HostMemoryProfile` data object represents @@ -39536,7 +39454,6 @@ type HostMemoryProfile struct { func init() { t["HostMemoryProfile"] = reflect.TypeOf((*HostMemoryProfile)(nil)).Elem() - minAPIVersionForType["HostMemoryProfile"] = "4.0" } // DataObject used for configuring the memory setting @@ -39549,7 +39466,6 @@ type HostMemorySpec struct { func init() { t["HostMemorySpec"] = reflect.TypeOf((*HostMemorySpec)(nil)).Elem() - minAPIVersionForType["HostMemorySpec"] = "4.0" } // Information about a memory tier on this host. @@ -39567,6 +39483,11 @@ type HostMemoryTierInfo struct { // See `HostMemoryTierFlags_enum` for supported // values. Flags []string `xml:"flags,omitempty" json:"flags,omitempty"` + // System internal flags pertaining to the memory tier. + // + // See + // `HostMemoryTierInternalFlags_enum` for supported values. + InternalFlags []string `xml:"internalFlags,omitempty" json:"internalFlags,omitempty" vim:"8.0.3.0"` // Size of the memory tier in bytes. Size int64 `xml:"size" json:"size"` } @@ -39588,7 +39509,6 @@ type HostMissingNetworksEvent struct { func init() { t["HostMissingNetworksEvent"] = reflect.TypeOf((*HostMissingNetworksEvent)(nil)).Elem() - minAPIVersionForType["HostMissingNetworksEvent"] = "4.0" } // This event records when host monitoring state has changed. @@ -39600,12 +39520,11 @@ type HostMonitoringStateChangedEvent struct { State string `xml:"state" json:"state"` // The previous service state in // `ClusterDasConfigInfoServiceState_enum` - PrevState string `xml:"prevState,omitempty" json:"prevState,omitempty" vim:"6.5"` + PrevState string `xml:"prevState,omitempty" json:"prevState,omitempty"` } func init() { t["HostMonitoringStateChangedEvent"] = reflect.TypeOf((*HostMonitoringStateChangedEvent)(nil)).Elem() - minAPIVersionForType["HostMonitoringStateChangedEvent"] = "4.0" } // The `HostMountInfo` data object provides information related @@ -39628,7 +39547,7 @@ type HostMountInfo struct { // For a discovered // volume, which is mounted, this is true. When this value is // unset, the default value is true. - Mounted *bool `xml:"mounted" json:"mounted,omitempty" vim:"5.0"` + Mounted *bool `xml:"mounted" json:"mounted,omitempty"` // Flag that indicates if the datastore is currently accessible from // the host. // @@ -39637,7 +39556,7 @@ type HostMountInfo struct { // You can use the `DatastoreSummary` property if the `HostMountInfo` // property is not set. The VirtualCenter Server will always make // sure the `DatastoreSummary` property is set correctly. - Accessible *bool `xml:"accessible" json:"accessible,omitempty" vim:"2.5"` + Accessible *bool `xml:"accessible" json:"accessible,omitempty"` // This optional property for inaccessible reason is reported only if // a datastore becomes inaccessible as reported by // `HostMountInfo.accessible` and @@ -39648,7 +39567,7 @@ type HostMountInfo struct { // This helps to determine host specific reason for datastore inaccessibility. // If the datastore becomes accessible following an inaccessible condition, // the property `HostMountInfo.inaccessibleReason` will be unset. - InaccessibleReason string `xml:"inaccessibleReason,omitempty" json:"inaccessibleReason,omitempty" vim:"5.1"` + InaccessibleReason string `xml:"inaccessibleReason,omitempty" json:"inaccessibleReason,omitempty"` // The name of the vmknic used during mount. // // Populated by the vmk control layer if the NAS @@ -39762,7 +39681,6 @@ type HostMultipathInfoHppLogicalUnitPolicy struct { func init() { t["HostMultipathInfoHppLogicalUnitPolicy"] = reflect.TypeOf((*HostMultipathInfoHppLogicalUnitPolicy)(nil)).Elem() - minAPIVersionForType["HostMultipathInfoHppLogicalUnitPolicy"] = "7.0" } // The `HostMultipathInfoLogicalUnit` data object @@ -39786,7 +39704,7 @@ type HostMultipathInfoLogicalUnit struct { // // This policy // is currently immutable. - StorageArrayTypePolicy *HostMultipathInfoLogicalUnitStorageArrayTypePolicy `xml:"storageArrayTypePolicy,omitempty" json:"storageArrayTypePolicy,omitempty" vim:"4.0"` + StorageArrayTypePolicy *HostMultipathInfoLogicalUnitStorageArrayTypePolicy `xml:"storageArrayTypePolicy,omitempty" json:"storageArrayTypePolicy,omitempty"` } func init() { @@ -39806,16 +39724,16 @@ type HostMultipathInfoLogicalUnitPolicy struct { // Use one of the following // strings: // For NMP plugin - // - VMW\_PSP\_FIXED - Use a preferred path whenever possible. - // - VMW\_PSP\_RR - Load balance. - // - VMW\_PSP\_MRU - Use the most recently used path. + // - VMW\_PSP\_FIXED - Use a preferred path whenever possible. + // - VMW\_PSP\_RR - Load balance. + // - VMW\_PSP\_MRU - Use the most recently used path. // // For HPP plugin - // - FIXED - Use a preferred path whenever possible. - // - LB-RR - Load Balance - round robin. - // - LB-IOPS - Load Balance - iops. - // - LB-BYTES - Load Balance - bytes. - // - LB--Latency - Load balance - least latency. + // - FIXED - Use a preferred path whenever possible. + // - LB-RR - Load Balance - round robin. + // - LB-IOPS - Load Balance - iops. + // - LB-BYTES - Load Balance - bytes. + // - LB--Latency - Load balance - least latency. // // You can also use the // `HostStorageSystem.QueryPathSelectionPolicyOptions` method @@ -39844,7 +39762,6 @@ type HostMultipathInfoLogicalUnitStorageArrayTypePolicy struct { func init() { t["HostMultipathInfoLogicalUnitStorageArrayTypePolicy"] = reflect.TypeOf((*HostMultipathInfoLogicalUnitStorageArrayTypePolicy)(nil)).Elem() - minAPIVersionForType["HostMultipathInfoLogicalUnitStorageArrayTypePolicy"] = "4.0" } // The `HostMultipathInfoPath` data object @@ -39867,8 +39784,8 @@ type HostMultipathInfoPath struct { // Use this name to configure LogicalUnit multipathing policy using `HostStorageSystem.EnableMultipathPath` and `HostStorageSystem.DisableMultipathPath`. Name string `xml:"name" json:"name"` // Deprecated as of VI API 4.0: - // - System reported path states are available in `HostMultipathInfoPath.state`. - // - Paths slated for I/O can be found using `HostMultipathInfoPath.isWorkingPath`. + // - System reported path states are available in `HostMultipathInfoPath.state`. + // - Paths slated for I/O can be found using `HostMultipathInfoPath.isWorkingPath`. // // State of the path. // @@ -39904,11 +39821,11 @@ type HostMultipathInfoPath struct { //
unknown
//
Path is in unknown error state.
// - State string `xml:"state,omitempty" json:"state,omitempty" vim:"4.0"` + State string `xml:"state,omitempty" json:"state,omitempty"` // A path, managed by a given path selection policy(psp) plugin, is // denoted to be a Working Path if the psp plugin is likely to select the // path for performing I/O in the near future. - IsWorkingPath *bool `xml:"isWorkingPath" json:"isWorkingPath,omitempty" vim:"4.0"` + IsWorkingPath *bool `xml:"isWorkingPath" json:"isWorkingPath,omitempty"` // The host bus adapter at one endpoint of this path. Adapter string `xml:"adapter" json:"adapter"` // The logical unit at one endpoint of this path. @@ -39938,7 +39855,6 @@ type HostMultipathStateInfo struct { func init() { t["HostMultipathStateInfo"] = reflect.TypeOf((*HostMultipathStateInfo)(nil)).Elem() - minAPIVersionForType["HostMultipathStateInfo"] = "4.0" } // Data object indicating state of storage path for a named path. @@ -39964,7 +39880,6 @@ type HostMultipathStateInfoPath struct { func init() { t["HostMultipathStateInfoPath"] = reflect.TypeOf((*HostMultipathStateInfoPath)(nil)).Elem() - minAPIVersionForType["HostMultipathStateInfoPath"] = "4.0" } type HostNasVolume struct { @@ -39979,7 +39894,7 @@ type HostNasVolume struct { // The remote path of NFS/CIFS mount point. RemotePath string `xml:"remotePath" json:"remotePath"` // In case of CIFS, the user name used while connecting to the server. - UserName string `xml:"userName,omitempty" json:"userName,omitempty" vim:"2.5"` + UserName string `xml:"userName,omitempty" json:"userName,omitempty"` // This field will hold host names (or ip addresses) of all // remote hosts configured for the datastore. // @@ -39992,11 +39907,11 @@ type HostNasVolume struct { // Addition of hostnames to this list is limited to MDS server host names // or the IP addresses. In other words, the Data Server host names IP addresses // will not be appended to this list. - RemoteHostNames []string `xml:"remoteHostNames,omitempty" json:"remoteHostNames,omitempty" vim:"6.0"` + RemoteHostNames []string `xml:"remoteHostNames,omitempty" json:"remoteHostNames,omitempty"` // Security type the volume is currently using. // // See `HostNasVolumeSecurityType_enum` - SecurityType string `xml:"securityType,omitempty" json:"securityType,omitempty" vim:"6.0"` + SecurityType string `xml:"securityType,omitempty" json:"securityType,omitempty"` // Indicates that this NAS volume is protocol endpoint. // // This @@ -40004,7 +39919,7 @@ type HostNasVolume struct { // VirtualVolume based Datastore. Check the host capability // `HostCapability.virtualVolumeDatastoreSupported`. // See `HostProtocolEndpoint`. - ProtocolEndpoint *bool `xml:"protocolEndpoint" json:"protocolEndpoint,omitempty" vim:"6.0"` + ProtocolEndpoint *bool `xml:"protocolEndpoint" json:"protocolEndpoint,omitempty"` } func init() { @@ -40027,7 +39942,6 @@ type HostNasVolumeConfig struct { func init() { t["HostNasVolumeConfig"] = reflect.TypeOf((*HostNasVolumeConfig)(nil)).Elem() - minAPIVersionForType["HostNasVolumeConfig"] = "4.0" } // Specification for creating NAS volume. @@ -40091,17 +40005,17 @@ type HostNasVolumeSpec struct { // `NFS41` // If not specified, defaults to // `NFS` - Type string `xml:"type,omitempty" json:"type,omitempty" vim:"2.5"` + Type string `xml:"type,omitempty" json:"type,omitempty"` // If type is CIFS, the user name to use when connecting to the // CIFS server. // // If type is NFS, this field will be ignored. - UserName string `xml:"userName,omitempty" json:"userName,omitempty" vim:"2.5"` + UserName string `xml:"userName,omitempty" json:"userName,omitempty"` // If type is CIFS, the password to use when connecting to the // CIFS server. // // If type is NFS, this field will be ignored. - Password string `xml:"password,omitempty" json:"password,omitempty" vim:"2.5"` + Password string `xml:"password,omitempty" json:"password,omitempty"` // Hostnames or IP addresses of remote NFS server. // // In case @@ -40109,11 +40023,11 @@ type HostNasVolumeSpec struct { // input should be same in both remoteHost and remoteHostNames. // In case of NFS v4.1, if vmknic binding is enabled, // then input can be in format {hostip1:vmknic1, hostip2:vmknic2}. - RemoteHostNames []string `xml:"remoteHostNames,omitempty" json:"remoteHostNames,omitempty" vim:"6.0"` + RemoteHostNames []string `xml:"remoteHostNames,omitempty" json:"remoteHostNames,omitempty"` // Provided during mount indicating what security type, // if any, to use // See `HostNasVolumeSecurityType_enum` - SecurityType string `xml:"securityType,omitempty" json:"securityType,omitempty" vim:"6.0"` + SecurityType string `xml:"securityType,omitempty" json:"securityType,omitempty"` // Name of the vmknic to be used by this mount. // // This field will be updated by a client with vmknic that will be used @@ -40146,7 +40060,6 @@ type HostNasVolumeUserInfo struct { func init() { t["HostNasVolumeUserInfo"] = reflect.TypeOf((*HostNasVolumeUserInfo)(nil)).Elem() - minAPIVersionForType["HostNasVolumeUserInfo"] = "6.0" } // A network address translation (NAT) service instance provides @@ -40163,7 +40076,6 @@ type HostNatService struct { func init() { t["HostNatService"] = reflect.TypeOf((*HostNatService)(nil)).Elem() - minAPIVersionForType["HostNatService"] = "2.5" } // This data object type describes the network address @@ -40186,7 +40098,6 @@ type HostNatServiceConfig struct { func init() { t["HostNatServiceConfig"] = reflect.TypeOf((*HostNatServiceConfig)(nil)).Elem() - minAPIVersionForType["HostNatServiceConfig"] = "2.5" } // This data object type specifies the information for the @@ -40219,7 +40130,6 @@ type HostNatServiceNameServiceSpec struct { func init() { t["HostNatServiceNameServiceSpec"] = reflect.TypeOf((*HostNatServiceNameServiceSpec)(nil)).Elem() - minAPIVersionForType["HostNatServiceNameServiceSpec"] = "2.5" } // This data object type describes the @@ -40253,7 +40163,6 @@ type HostNatServicePortForwardSpec struct { func init() { t["HostNatServicePortForwardSpec"] = reflect.TypeOf((*HostNatServicePortForwardSpec)(nil)).Elem() - minAPIVersionForType["HostNatServicePortForwardSpec"] = "2.5" } // This data object type provides the details about the @@ -40292,7 +40201,6 @@ type HostNatServiceSpec struct { func init() { t["HostNatServiceSpec"] = reflect.TypeOf((*HostNatServiceSpec)(nil)).Elem() - minAPIVersionForType["HostNatServiceSpec"] = "2.5" } // Capability vector indicating the available product features. @@ -40341,30 +40249,30 @@ type HostNetCapabilities struct { // The maximum number of port groups supported per virtual switch. // // This property will not be set if this value is unlimited. - MaxPortGroupsPerVswitch int32 `xml:"maxPortGroupsPerVswitch,omitempty" json:"maxPortGroupsPerVswitch,omitempty" vim:"2.5"` + MaxPortGroupsPerVswitch int32 `xml:"maxPortGroupsPerVswitch,omitempty" json:"maxPortGroupsPerVswitch,omitempty"` // The flag to indicate whether virtual switch configuration is // supported. // // This means that operations to add, remove, update virtual // switches are supported. - VswitchConfigSupported bool `xml:"vswitchConfigSupported" json:"vswitchConfigSupported" vim:"2.5"` + VswitchConfigSupported bool `xml:"vswitchConfigSupported" json:"vswitchConfigSupported"` // The flag to indicate whether Virtual NIC configuration is supported. // // This means that operations to add, remove, update virtualNic are // supported. - VnicConfigSupported bool `xml:"vnicConfigSupported" json:"vnicConfigSupported" vim:"2.5"` + VnicConfigSupported bool `xml:"vnicConfigSupported" json:"vnicConfigSupported"` // The flag to indicate whether ip route configuration for the host // is supported. - IpRouteConfigSupported bool `xml:"ipRouteConfigSupported" json:"ipRouteConfigSupported" vim:"2.5"` + IpRouteConfigSupported bool `xml:"ipRouteConfigSupported" json:"ipRouteConfigSupported"` // The flag to indicate whether DNS configuration for the host is // supported. - DnsConfigSupported bool `xml:"dnsConfigSupported" json:"dnsConfigSupported" vim:"2.5"` + DnsConfigSupported bool `xml:"dnsConfigSupported" json:"dnsConfigSupported"` // This flag indicates whether or not the host is able to support // dhcp configuration for vnics. - DhcpOnVnicSupported bool `xml:"dhcpOnVnicSupported" json:"dhcpOnVnicSupported" vim:"2.5"` + DhcpOnVnicSupported bool `xml:"dhcpOnVnicSupported" json:"dhcpOnVnicSupported"` // The flag to indicate whether the host is capable of communicating // using ipv6 protocol - IpV6Supported *bool `xml:"ipV6Supported" json:"ipV6Supported,omitempty" vim:"4.0"` + IpV6Supported *bool `xml:"ipV6Supported" json:"ipV6Supported,omitempty"` // The flag to indicate whether the host supports Backup NFC NIOC system // traffic, Unset means Backup NFC NIOC system traffic is not supported. BackupNfcNiocSupported *bool `xml:"backupNfcNiocSupported" json:"backupNfcNiocSupported,omitempty" vim:"7.0.1.0"` @@ -40436,7 +40344,6 @@ type HostNetStackInstance struct { func init() { t["HostNetStackInstance"] = reflect.TypeOf((*HostNetStackInstance)(nil)).Elem() - minAPIVersionForType["HostNetStackInstance"] = "5.5" } // This data object type describes networking host configuration data objects. @@ -40452,7 +40359,7 @@ type HostNetworkConfig struct { // Virtual switches configured on the host. Vswitch []HostVirtualSwitchConfig `xml:"vswitch,omitempty" json:"vswitch,omitempty"` // Host proxy switches configured on the host. - ProxySwitch []HostProxySwitchConfig `xml:"proxySwitch,omitempty" json:"proxySwitch,omitempty" vim:"4.0"` + ProxySwitch []HostProxySwitchConfig `xml:"proxySwitch,omitempty" json:"proxySwitch,omitempty"` // Port groups configured on the host. Portgroup []HostPortGroupConfig `xml:"portgroup,omitempty" json:"portgroup,omitempty"` // Physical network adapters as seen by the primary operating system. @@ -40482,21 +40389,21 @@ type HostNetworkConfig struct { // the default NetStackInstance. // // IP routing table configuration of the host. - RouteTableConfig *HostIpRouteTableConfig `xml:"routeTableConfig,omitempty" json:"routeTableConfig,omitempty" vim:"4.0"` + RouteTableConfig *HostIpRouteTableConfig `xml:"routeTableConfig,omitempty" json:"routeTableConfig,omitempty"` // Dynamic Host Control Protocol (DHCP) Service instances configured // on the host. - Dhcp []HostDhcpServiceConfig `xml:"dhcp,omitempty" json:"dhcp,omitempty" vim:"2.5"` + Dhcp []HostDhcpServiceConfig `xml:"dhcp,omitempty" json:"dhcp,omitempty"` // Network address translation (NAT) Service instances configured // on the host. - Nat []HostNatServiceConfig `xml:"nat,omitempty" json:"nat,omitempty" vim:"2.5"` + Nat []HostNatServiceConfig `xml:"nat,omitempty" json:"nat,omitempty"` // Enable or disable IPv6 protocol on this system. // // This property must be set by itself, no other property can accompany // this change. Following the successful change, the system should be rebooted to // have the change take effect. - IpV6Enabled *bool `xml:"ipV6Enabled" json:"ipV6Enabled,omitempty" vim:"4.0"` + IpV6Enabled *bool `xml:"ipV6Enabled" json:"ipV6Enabled,omitempty"` // The list of network stack instance spec - NetStackSpec []HostNetworkConfigNetStackSpec `xml:"netStackSpec,omitempty" json:"netStackSpec,omitempty" vim:"5.5"` + NetStackSpec []HostNetworkConfigNetStackSpec `xml:"netStackSpec,omitempty" json:"netStackSpec,omitempty"` // Current status of NVDS to VDS migration. // // See `HostNetworkConfig*.*HostNetworkConfigMigrationStatus_enum` @@ -40523,7 +40430,6 @@ type HostNetworkConfigNetStackSpec struct { func init() { t["HostNetworkConfigNetStackSpec"] = reflect.TypeOf((*HostNetworkConfigNetStackSpec)(nil)).Elem() - minAPIVersionForType["HostNetworkConfigNetStackSpec"] = "5.5" } // The result returned by updateNetworkConfig call. @@ -40550,13 +40456,13 @@ type HostNetworkInfo struct { // Virtual switches configured on the host. Vswitch []HostVirtualSwitch `xml:"vswitch,omitempty" json:"vswitch,omitempty"` // Proxy switches configured on the host. - ProxySwitch []HostProxySwitch `xml:"proxySwitch,omitempty" json:"proxySwitch,omitempty" vim:"4.0"` + ProxySwitch []HostProxySwitch `xml:"proxySwitch,omitempty" json:"proxySwitch,omitempty"` // Port groups configured on the host. Portgroup []HostPortGroup `xml:"portgroup,omitempty" json:"portgroup,omitempty"` // Physical network adapters as seen by the primary operating system. Pnic []PhysicalNic `xml:"pnic,omitempty" json:"pnic,omitempty"` // Remote direct memory access devices, if any are present on the host. - RdmaDevice []HostRdmaDevice `xml:"rdmaDevice,omitempty" json:"rdmaDevice,omitempty" vim:"7.0"` + RdmaDevice []HostRdmaDevice `xml:"rdmaDevice,omitempty" json:"rdmaDevice,omitempty"` // Virtual network adapters configured on the host (hosted products) // or the vmkernel. // @@ -40599,23 +40505,23 @@ type HostNetworkInfo struct { // Get operation will only return its value of default NetStackInstance. // // IP routing table - RouteTableInfo *HostIpRouteTableInfo `xml:"routeTableInfo,omitempty" json:"routeTableInfo,omitempty" vim:"4.0"` + RouteTableInfo *HostIpRouteTableInfo `xml:"routeTableInfo,omitempty" json:"routeTableInfo,omitempty"` // DHCP Service instances configured on the host. - Dhcp []HostDhcpService `xml:"dhcp,omitempty" json:"dhcp,omitempty" vim:"2.5"` + Dhcp []HostDhcpService `xml:"dhcp,omitempty" json:"dhcp,omitempty"` // NAT service instances configured on the host. - Nat []HostNatService `xml:"nat,omitempty" json:"nat,omitempty" vim:"2.5"` + Nat []HostNatService `xml:"nat,omitempty" json:"nat,omitempty"` // Enable or disable IPv6 protocol on this system. - IpV6Enabled *bool `xml:"ipV6Enabled" json:"ipV6Enabled,omitempty" vim:"4.0"` + IpV6Enabled *bool `xml:"ipV6Enabled" json:"ipV6Enabled,omitempty"` // If true then dual IPv4/IPv6 stack enabled else IPv4 only. - AtBootIpV6Enabled *bool `xml:"atBootIpV6Enabled" json:"atBootIpV6Enabled,omitempty" vim:"4.1"` + AtBootIpV6Enabled *bool `xml:"atBootIpV6Enabled" json:"atBootIpV6Enabled,omitempty"` // List of NetStackInstances - NetStackInstance []HostNetStackInstance `xml:"netStackInstance,omitempty" json:"netStackInstance,omitempty" vim:"5.5"` + NetStackInstance []HostNetStackInstance `xml:"netStackInstance,omitempty" json:"netStackInstance,omitempty"` // List of opaque switches configured on the host. - OpaqueSwitch []HostOpaqueSwitch `xml:"opaqueSwitch,omitempty" json:"opaqueSwitch,omitempty" vim:"5.5"` + OpaqueSwitch []HostOpaqueSwitch `xml:"opaqueSwitch,omitempty" json:"opaqueSwitch,omitempty"` // List of opaque networks - OpaqueNetwork []HostOpaqueNetworkInfo `xml:"opaqueNetwork,omitempty" json:"opaqueNetwork,omitempty" vim:"5.5"` + OpaqueNetwork []HostOpaqueNetworkInfo `xml:"opaqueNetwork,omitempty" json:"opaqueNetwork,omitempty"` // The nsx transport node Id - NsxTransportNodeId string `xml:"nsxTransportNodeId,omitempty" json:"nsxTransportNodeId,omitempty" vim:"7.0"` + NsxTransportNodeId string `xml:"nsxTransportNodeId,omitempty" json:"nsxTransportNodeId,omitempty"` // Whether NSX N-VDS to VDS migration is required NvdsToVdsMigrationRequired *bool `xml:"nvdsToVdsMigrationRequired" json:"nvdsToVdsMigrationRequired,omitempty" vim:"7.0.2.0"` // Current status of NVDS to VDS migration. @@ -40694,7 +40600,6 @@ type HostNetworkResourceRuntime struct { func init() { t["HostNetworkResourceRuntime"] = reflect.TypeOf((*HostNetworkResourceRuntime)(nil)).Elem() - minAPIVersionForType["HostNetworkResourceRuntime"] = "6.0" } // This data object type describes security policy governing ports. @@ -40771,12 +40676,12 @@ type HostNicFailureCriteria struct { // // To use link speed as the criteria, _checkSpeed_ must be one of // the following values: - // - `*exact*`: Use exact speed to detect link failure. - // `*speed*` is the configured exact speed in megabits per second. - // - `*minimum*`: Use minimum speed to detect failure. - // `*speed*` is the configured minimum speed in megabits per second. - // - **empty string**: Do not use link speed to detect failure. - // `*speed*` is unused in this case. + // - `*exact*`: Use exact speed to detect link failure. + // `*speed*` is the configured exact speed in megabits per second. + // - `*minimum*`: Use minimum speed to detect failure. + // `*speed*` is the configured minimum speed in megabits per second. + // - **empty string**: Do not use link speed to detect failure. + // `*speed*` is unused in this case. CheckSpeed string `xml:"checkSpeed,omitempty" json:"checkSpeed,omitempty"` // Deprecated as of VI API 5.1, this property is not supported. // @@ -40872,10 +40777,10 @@ type HostNicTeamingPolicy struct { // Network adapter teaming policy includes failover and load balancing, // It can be one of the following: - // - `*loadbalance\_ip*`: route based on ip hash. - // - `*loadbalance\_srcmac*`: route based on source MAC hash. - // - `*loadbalance\_srcid*`: route based on the source of the port ID. - // - `*failover\_explicit*`: use explicit failover order. + // - `*loadbalance\_ip*`: route based on ip hash. + // - `*loadbalance\_srcmac*`: route based on source MAC hash. + // - `*loadbalance\_srcid*`: route based on the source of the port ID. + // - `*failover\_explicit*`: use explicit failover order. // // See also `HostNetCapabilities.nicTeamingPolicy`. Policy string `xml:"policy,omitempty" json:"policy,omitempty"` @@ -40932,7 +40837,6 @@ type HostNoAvailableNetworksEvent struct { func init() { t["HostNoAvailableNetworksEvent"] = reflect.TypeOf((*HostNoAvailableNetworksEvent)(nil)).Elem() - minAPIVersionForType["HostNoAvailableNetworksEvent"] = "4.0" } // This event records the fact that a host does not have any HA-enabled port @@ -40943,7 +40847,6 @@ type HostNoHAEnabledPortGroupsEvent struct { func init() { t["HostNoHAEnabledPortGroupsEvent"] = reflect.TypeOf((*HostNoHAEnabledPortGroupsEvent)(nil)).Elem() - minAPIVersionForType["HostNoHAEnabledPortGroupsEvent"] = "4.0" } // This event records the fact that a host does not have a redundant @@ -40957,7 +40860,6 @@ type HostNoRedundantManagementNetworkEvent struct { func init() { t["HostNoRedundantManagementNetworkEvent"] = reflect.TypeOf((*HostNoRedundantManagementNetworkEvent)(nil)).Elem() - minAPIVersionForType["HostNoRedundantManagementNetworkEvent"] = "2.5" } // This event records that host went out of compliance. @@ -40967,7 +40869,6 @@ type HostNonCompliantEvent struct { func init() { t["HostNonCompliantEvent"] = reflect.TypeOf((*HostNonCompliantEvent)(nil)).Elem() - minAPIVersionForType["HostNonCompliantEvent"] = "4.0" } // A HostNotConnected fault is thrown if a method needs @@ -40994,7 +40895,6 @@ type HostNotInClusterEvent struct { func init() { t["HostNotInClusterEvent"] = reflect.TypeOf((*HostNotInClusterEvent)(nil)).Elem() - minAPIVersionForType["HostNotInClusterEvent"] = "2.5" } // A HostNotReachable fault is thrown if the server was unable @@ -41033,12 +40933,11 @@ type HostNtpConfig struct { // When submitting a new ntp commands to this property via // `HostDateTimeSystem.UpdateDateTimeConfig` method, any 'restrict' // or 'drift' commands will be ignored as the those are set to fixed defaults. - ConfigFile []string `xml:"configFile,omitempty" json:"configFile,omitempty" vim:"6.0"` + ConfigFile []string `xml:"configFile,omitempty" json:"configFile,omitempty"` } func init() { t["HostNtpConfig"] = reflect.TypeOf((*HostNtpConfig)(nil)).Elem() - minAPIVersionForType["HostNtpConfig"] = "2.5" } // Information about NUMA (non-uniform memory access). @@ -41092,7 +40991,7 @@ type HostNumaNode struct { // Information about each of the pci devices associated with the node. // // The string is of SBDF format, "Segment:Bus:Device.Function". - PciId []string `xml:"pciId,omitempty" json:"pciId,omitempty" vim:"6.7"` + PciId []string `xml:"pciId,omitempty" json:"pciId,omitempty"` } func init() { @@ -41156,7 +41055,7 @@ type HostNumericSensorInfo struct { // BMC device.Entity ID.Instance.SensorNumber // Can be used to match a NumericSensorInfo object to // esxcli hardware ipmi sdr list - Id string `xml:"id,omitempty" json:"id,omitempty" vim:"6.5"` + Id string `xml:"id,omitempty" json:"id,omitempty"` // The IPMI Sensor/probe that is reporting this event. // // Use this value @@ -41167,14 +41066,13 @@ type HostNumericSensorInfo struct { // Reports the ISO 8601 Timestamp when this sensor was last updated by // management controller if the this sensor is capable of tracking // when it was last updated. - TimeStamp string `xml:"timeStamp,omitempty" json:"timeStamp,omitempty" vim:"6.5"` + TimeStamp string `xml:"timeStamp,omitempty" json:"timeStamp,omitempty"` // The FRU this sensor monitors if any. Fru *HostFru `xml:"fru,omitempty" json:"fru,omitempty" vim:"8.0.0.1"` } func init() { t["HostNumericSensorInfo"] = reflect.TypeOf((*HostNumericSensorInfo)(nil)).Elem() - minAPIVersionForType["HostNumericSensorInfo"] = "2.5" } // Specifies the parameters necessary to connect to a regular NVME over Fabrics @@ -41226,13 +41124,12 @@ type HostNvmeConnectSpec struct { // If unset, it defaults to a reasonable value which may vary between // releases (currently 30 seconds). // For further information, see: - // - "NVM Express 1.3", Section 5.21.1.15, "Keep Alive Timer" + // - "NVM Express 1.3", Section 5.21.1.15, "Keep Alive Timer" KeepAliveTimeout int32 `xml:"keepAliveTimeout,omitempty" json:"keepAliveTimeout,omitempty"` } func init() { t["HostNvmeConnectSpec"] = reflect.TypeOf((*HostNvmeConnectSpec)(nil)).Elem() - minAPIVersionForType["HostNvmeConnectSpec"] = "7.0" } // This data object represents an NVME controller. @@ -41259,8 +41156,8 @@ type HostNvmeController struct { // Each NVME controller is associated with an NVME subsystem // which can present a collection of controllers to the adapter. // For more details, refer to: - // - "NVM Express over Fabrics 1.0", Section 1.5.2, - // "NVM Subsystem". + // - "NVM Express over Fabrics 1.0", Section 1.5.2, + // "NVM Subsystem". Subnqn string `xml:"subnqn" json:"subnqn"` // Name of the controller. // @@ -41277,15 +41174,15 @@ type HostNvmeController struct { // // The set of possible values is described in `HostNvmeTransportType_enum`. // For details, see: - // - "NVM Express over Fabrics 1.0", Section 1.5.1, - // "Fabrics and Transports". + // - "NVM Express over Fabrics 1.0", Section 1.5.1, + // "Fabrics and Transports". TransportType string `xml:"transportType" json:"transportType"` // Indicates whether fused operations are supported by the controller. // // An NVME controller may support fused operations. This is required // to support shared storage, otherwise data corruption may occur. // For more details, see: - // - "NVM Express 1.3", Section 6.2, "Fused Operations". + // - "NVM Express 1.3", Section 6.2, "Fused Operations". FusedOperationSupported bool `xml:"fusedOperationSupported" json:"fusedOperationSupported"` // The number of I/O queues allocated for the controller. NumberOfQueues int32 `xml:"numberOfQueues" json:"numberOfQueues"` @@ -41293,15 +41190,15 @@ type HostNvmeController struct { // // This will not be greater than the Maximum Queue Entries Supported // (mqes) value for the controller. For more information, see: - // - "NVM Express 1.3", section 3.1, "Register definition". + // - "NVM Express 1.3", section 3.1, "Register definition". QueueSize int32 `xml:"queueSize" json:"queueSize"` // List of NVME namespaces attached to the controller. // // Namespaces provide access to a non-volatile storage medium // which is part of the NVM subsystem. For an overview, see: - // - "NVM Express over Fabrics 1.0", Section 1.5.2, - // "NVM Subsystem". - // - "NVM Express 1.3", section 6.1, "Namespaces". + // - "NVM Express over Fabrics 1.0", Section 1.5.2, + // "NVM Subsystem". + // - "NVM Express 1.3", section 6.1, "Namespaces". AttachedNamespace []HostNvmeNamespace `xml:"attachedNamespace,omitempty" json:"attachedNamespace,omitempty"` // The vendor ID of the controller, if available. VendorId string `xml:"vendorId,omitempty" json:"vendorId,omitempty"` @@ -41315,7 +41212,6 @@ type HostNvmeController struct { func init() { t["HostNvmeController"] = reflect.TypeOf((*HostNvmeController)(nil)).Elem() - minAPIVersionForType["HostNvmeController"] = "7.0" } // Specifies the parameters necessary to disconnect an NVME controller @@ -41345,7 +41241,6 @@ type HostNvmeDisconnectSpec struct { func init() { t["HostNvmeDisconnectSpec"] = reflect.TypeOf((*HostNvmeDisconnectSpec)(nil)).Elem() - minAPIVersionForType["HostNvmeDisconnectSpec"] = "7.0" } // Specifies the parameters necessary to connect to a Discovery Service and @@ -41375,7 +41270,6 @@ type HostNvmeDiscoverSpec struct { func init() { t["HostNvmeDiscoverSpec"] = reflect.TypeOf((*HostNvmeDiscoverSpec)(nil)).Elem() - minAPIVersionForType["HostNvmeDiscoverSpec"] = "7.0" } // This data object represents the Discovery Log returned by @@ -41405,7 +41299,6 @@ type HostNvmeDiscoveryLog struct { func init() { t["HostNvmeDiscoveryLog"] = reflect.TypeOf((*HostNvmeDiscoveryLog)(nil)).Elem() - minAPIVersionForType["HostNvmeDiscoveryLog"] = "7.0" } // This data object represents a single entry in the Discovery @@ -41430,8 +41323,8 @@ type HostNvmeDiscoveryLogEntry struct { // Corresponds to the PORTID field in the Discovery Log // Page Entry as specified by the NVME over Fabrics spec. // For an overview, see: - // - "NVM Express over Fabrics 1.0", Section 1.5.2, - // NVM Subsystem + // - "NVM Express over Fabrics 1.0", Section 1.5.2, + // NVM Subsystem SubsystemPortId int32 `xml:"subsystemPortId" json:"subsystemPortId"` // NVME Controller ID within the NVM subsystem. // @@ -41490,7 +41383,6 @@ type HostNvmeDiscoveryLogEntry struct { func init() { t["HostNvmeDiscoveryLogEntry"] = reflect.TypeOf((*HostNvmeDiscoveryLogEntry)(nil)).Elem() - minAPIVersionForType["HostNvmeDiscoveryLogEntry"] = "7.0" } // This data object represents an NVM Express Namespace. @@ -41520,7 +41412,7 @@ type HostNvmeNamespace struct { // // The namespace ID is only unique among the namespaces // attached to the same controller. For details, see: - // - "NVM Express 1.3", section 6.1, "Namespaces". + // - "NVM Express 1.3", section 6.1, "Namespaces". Id int32 `xml:"id" json:"id"` // Block size of the namespace in bytes. // @@ -41533,14 +41425,13 @@ type HostNvmeNamespace struct { // // Corresponds to the NCAP field in the Identify Namespace data // structure: - // - "NVM Express 1.3", Section 5.15, Figure 114, - // "Identify Namespace Data Structure" + // - "NVM Express 1.3", Section 5.15, Figure 114, + // "Identify Namespace Data Structure" CapacityInBlocks int64 `xml:"capacityInBlocks" json:"capacityInBlocks"` } func init() { t["HostNvmeNamespace"] = reflect.TypeOf((*HostNvmeNamespace)(nil)).Elem() - minAPIVersionForType["HostNvmeNamespace"] = "7.0" } // This data object represents the raw transport specific parameters @@ -41587,7 +41478,6 @@ type HostNvmeOpaqueTransportParameters struct { func init() { t["HostNvmeOpaqueTransportParameters"] = reflect.TypeOf((*HostNvmeOpaqueTransportParameters)(nil)).Elem() - minAPIVersionForType["HostNvmeOpaqueTransportParameters"] = "7.0" } // This data object represents the transport specific parameters @@ -41603,7 +41493,6 @@ type HostNvmeOverFibreChannelParameters struct { func init() { t["HostNvmeOverFibreChannelParameters"] = reflect.TypeOf((*HostNvmeOverFibreChannelParameters)(nil)).Elem() - minAPIVersionForType["HostNvmeOverFibreChannelParameters"] = "7.0" } // This data object represents the transport specific parameters @@ -41632,7 +41521,6 @@ type HostNvmeOverRdmaParameters struct { func init() { t["HostNvmeOverRdmaParameters"] = reflect.TypeOf((*HostNvmeOverRdmaParameters)(nil)).Elem() - minAPIVersionForType["HostNvmeOverRdmaParameters"] = "7.0" } // This data object represents the transport specific parameters @@ -41657,8 +41545,8 @@ type HostNvmeOverTcpParameters struct { // described in `HostDigestVerificationSetting_enum`. If unset, // a default value of disabled is assumed. // For details, see: - // - NVM Express Technical Proposal 8000 - NVMe/TCP Transport, - // Section 7.4.10.2, "Initialize Connection Request PDU (ICReq)" - DGST field. + // - NVM Express Technical Proposal 8000 - NVMe/TCP Transport, + // Section 7.4.10.2, "Initialize Connection Request PDU (ICReq)" - DGST field. // // When part of `HostNvmeDiscoveryLogEntry`, this value is unset. DigestVerification string `xml:"digestVerification,omitempty" json:"digestVerification,omitempty"` @@ -41682,7 +41570,6 @@ type HostNvmeSpec struct { func init() { t["HostNvmeSpec"] = reflect.TypeOf((*HostNvmeSpec)(nil)).Elem() - minAPIVersionForType["HostNvmeSpec"] = "7.0" } // This data object type describes the NVME topology information. @@ -41709,7 +41596,6 @@ type HostNvmeTopology struct { func init() { t["HostNvmeTopology"] = reflect.TypeOf((*HostNvmeTopology)(nil)).Elem() - minAPIVersionForType["HostNvmeTopology"] = "7.0" } // This data object describes the NVME interface that is @@ -41732,7 +41618,6 @@ type HostNvmeTopologyInterface struct { func init() { t["HostNvmeTopologyInterface"] = reflect.TypeOf((*HostNvmeTopologyInterface)(nil)).Elem() - minAPIVersionForType["HostNvmeTopologyInterface"] = "7.0" } // This data object represents the transport specific parameters @@ -41746,7 +41631,6 @@ type HostNvmeTransportParameters struct { func init() { t["HostNvmeTransportParameters"] = reflect.TypeOf((*HostNvmeTransportParameters)(nil)).Elem() - minAPIVersionForType["HostNvmeTransportParameters"] = "7.0" } // Information on opaque networks that are available on the host. @@ -41760,18 +41644,17 @@ type HostOpaqueNetworkInfo struct { // The type of the opaque network. OpaqueNetworkType string `xml:"opaqueNetworkType" json:"opaqueNetworkType"` // IDs of networking zones that back the opaque network. - PnicZone []string `xml:"pnicZone,omitempty" json:"pnicZone,omitempty" vim:"6.0"` + PnicZone []string `xml:"pnicZone,omitempty" json:"pnicZone,omitempty"` // The capability of the opaque network. // // Refer `OpaqueNetworkCapability` - Capability *OpaqueNetworkCapability `xml:"capability,omitempty" json:"capability,omitempty" vim:"6.5"` + Capability *OpaqueNetworkCapability `xml:"capability,omitempty" json:"capability,omitempty"` // Extra NSX specific properties for opaque networks. - ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr" json:"extraConfig,omitempty" vim:"6.5"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr" json:"extraConfig,omitempty"` } func init() { t["HostOpaqueNetworkInfo"] = reflect.TypeOf((*HostOpaqueNetworkInfo)(nil)).Elem() - minAPIVersionForType["HostOpaqueNetworkInfo"] = "5.5" } // The OpaqueSwitch contains basic information about virtual switches that are @@ -41786,23 +41669,22 @@ type HostOpaqueSwitch struct { // The set of physical network adapters associated with this switch. Pnic []string `xml:"pnic,omitempty" json:"pnic,omitempty"` // The IDs of networking zones associated with this switch. - PnicZone []HostOpaqueSwitchPhysicalNicZone `xml:"pnicZone,omitempty" json:"pnicZone,omitempty" vim:"6.0"` + PnicZone []HostOpaqueSwitchPhysicalNicZone `xml:"pnicZone,omitempty" json:"pnicZone,omitempty"` // Opaque switch status. // // See // `OpaqueSwitchState` for valid values. - Status string `xml:"status,omitempty" json:"status,omitempty" vim:"6.0"` + Status string `xml:"status,omitempty" json:"status,omitempty"` // List of VTEPs associated with this switch. - Vtep []HostVirtualNic `xml:"vtep,omitempty" json:"vtep,omitempty" vim:"6.0"` + Vtep []HostVirtualNic `xml:"vtep,omitempty" json:"vtep,omitempty"` // Extra NSX specific properties for opaque switch. - ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr" json:"extraConfig,omitempty" vim:"6.5"` + ExtraConfig []BaseOptionValue `xml:"extraConfig,omitempty,typeattr" json:"extraConfig,omitempty"` // Array of host specific feature capabilities that the switch has. - FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty" json:"featureCapability,omitempty" vim:"6.7"` + FeatureCapability []HostFeatureCapability `xml:"featureCapability,omitempty" json:"featureCapability,omitempty"` } func init() { t["HostOpaqueSwitch"] = reflect.TypeOf((*HostOpaqueSwitch)(nil)).Elem() - minAPIVersionForType["HostOpaqueSwitch"] = "5.5" } type HostOpaqueSwitchPhysicalNicZone struct { @@ -41830,7 +41712,6 @@ type HostOvercommittedEvent struct { func init() { t["HostOvercommittedEvent"] = reflect.TypeOf((*HostOvercommittedEvent)(nil)).Elem() - minAPIVersionForType["HostOvercommittedEvent"] = "4.0" } // The VMFS file system. @@ -41845,7 +41726,6 @@ type HostPMemVolume struct { func init() { t["HostPMemVolume"] = reflect.TypeOf((*HostPMemVolume)(nil)).Elem() - minAPIVersionForType["HostPMemVolume"] = "6.7" } // The ParallelScsiHba data object type describes a @@ -41867,6 +41747,28 @@ func init() { t["HostParallelScsiTargetTransport"] = reflect.TypeOf((*HostParallelScsiTargetTransport)(nil)).Elem() } +// This data object contains information about the runtime status of +// a partial maintenance mode. +type HostPartialMaintenanceModeRuntimeInfo struct { + DynamicData + + // The unique identifier of the partial maintenance mode. + // + // The values of the identifiers for the most common kinds of partial + // maintenance modes are enumerated in `HostPartialMaintenanceModeId_enum`. + Key string `xml:"key" json:"key"` + // The current runtime status for the particular partial maintenance mode. + // + // The list of supported values is specified in + // `HostPartialMaintenanceModeStatus_enum`. + HostStatus string `xml:"hostStatus" json:"hostStatus"` +} + +func init() { + t["HostPartialMaintenanceModeRuntimeInfo"] = reflect.TypeOf((*HostPartialMaintenanceModeRuntimeInfo)(nil)).Elem() + minAPIVersionForType["HostPartialMaintenanceModeRuntimeInfo"] = "8.0.3.0" +} + type HostPatchManagerLocator struct { DynamicData @@ -41907,7 +41809,6 @@ type HostPatchManagerPatchManagerOperationSpec struct { func init() { t["HostPatchManagerPatchManagerOperationSpec"] = reflect.TypeOf((*HostPatchManagerPatchManagerOperationSpec)(nil)).Elem() - minAPIVersionForType["HostPatchManagerPatchManagerOperationSpec"] = "4.0" } // The result of the operation. @@ -41927,7 +41828,6 @@ type HostPatchManagerResult struct { func init() { t["HostPatchManagerResult"] = reflect.TypeOf((*HostPatchManagerResult)(nil)).Elem() - minAPIVersionForType["HostPatchManagerResult"] = "4.0" } type HostPatchManagerStatus struct { @@ -42034,7 +41934,6 @@ type HostPathSelectionPolicyOption struct { func init() { t["HostPathSelectionPolicyOption"] = reflect.TypeOf((*HostPathSelectionPolicyOption)(nil)).Elem() - minAPIVersionForType["HostPathSelectionPolicyOption"] = "4.0" } // This data object type describes information about @@ -42090,9 +41989,13 @@ type HostPciDevice struct { // will convert the ID to its two's complement for the WSDL representation. SubDeviceId int16 `xml:"subDeviceId" json:"subDeviceId"` // The parent bridge of this PCI. - ParentBridge string `xml:"parentBridge,omitempty" json:"parentBridge,omitempty" vim:"4.0"` + ParentBridge string `xml:"parentBridge,omitempty" json:"parentBridge,omitempty"` // The device name of this PCI. DeviceName string `xml:"deviceName" json:"deviceName"` + // The name for the PCI device class representing this PCI. + // + // For example: "Host bridge", "iSCSI device", "Fibre channel HBA". + DeviceClassName string `xml:"deviceClassName,omitempty" json:"deviceClassName,omitempty" vim:"8.0.3.0"` } func init() { @@ -42114,14 +42017,13 @@ type HostPciPassthruConfig struct { // based on `HostCapability.deviceRebindWithoutRebootSupported`. // If the configuration can be applied immediately, it // will be, otherwise the changes will take effect after reboot. - ApplyNow *bool `xml:"applyNow" json:"applyNow,omitempty" vim:"7.0"` + ApplyNow *bool `xml:"applyNow" json:"applyNow,omitempty"` // The hardware label of the this PCI device. HardwareLabel string `xml:"hardwareLabel,omitempty" json:"hardwareLabel,omitempty" vim:"7.0.2.0"` } func init() { t["HostPciPassthruConfig"] = reflect.TypeOf((*HostPciPassthruConfig)(nil)).Elem() - minAPIVersionForType["HostPciPassthruConfig"] = "4.0" } // This data object provides information about the state of PciPassthru @@ -42145,7 +42047,6 @@ type HostPciPassthruInfo struct { func init() { t["HostPciPassthruInfo"] = reflect.TypeOf((*HostPciPassthruInfo)(nil)).Elem() - minAPIVersionForType["HostPciPassthruInfo"] = "4.0" } // This data object describes the Peripheral Component Interconnect Express @@ -42156,7 +42057,6 @@ type HostPcieHba struct { func init() { t["HostPcieHba"] = reflect.TypeOf((*HostPcieHba)(nil)).Elem() - minAPIVersionForType["HostPcieHba"] = "7.0" } // Peripheral Component Interconnect Express (PCIe) @@ -42167,7 +42067,6 @@ type HostPcieTargetTransport struct { func init() { t["HostPcieTargetTransport"] = reflect.TypeOf((*HostPcieTargetTransport)(nil)).Elem() - minAPIVersionForType["HostPcieTargetTransport"] = "7.0" } // Host Hardware information about configured and available @@ -42183,7 +42082,6 @@ type HostPersistentMemoryInfo struct { func init() { t["HostPersistentMemoryInfo"] = reflect.TypeOf((*HostPersistentMemoryInfo)(nil)).Elem() - minAPIVersionForType["HostPersistentMemoryInfo"] = "6.7" } // This data type describes the Virtual Machine and @@ -42204,7 +42102,6 @@ type HostPlacedVirtualNicIdentifier struct { func init() { t["HostPlacedVirtualNicIdentifier"] = reflect.TypeOf((*HostPlacedVirtualNicIdentifier)(nil)).Elem() - minAPIVersionForType["HostPlacedVirtualNicIdentifier"] = "6.0" } // This data object represents the plug-store topology on a host @@ -42285,7 +42182,6 @@ type HostPlugStoreTopology struct { func init() { t["HostPlugStoreTopology"] = reflect.TypeOf((*HostPlugStoreTopology)(nil)).Elem() - minAPIVersionForType["HostPlugStoreTopology"] = "4.0" } // This data object type is an association class that describes a host bus @@ -42306,7 +42202,6 @@ type HostPlugStoreTopologyAdapter struct { func init() { t["HostPlugStoreTopologyAdapter"] = reflect.TypeOf((*HostPlugStoreTopologyAdapter)(nil)).Elem() - minAPIVersionForType["HostPlugStoreTopologyAdapter"] = "4.0" } // This data object type is an association class that describes a ScsiLun @@ -42327,7 +42222,6 @@ type HostPlugStoreTopologyDevice struct { func init() { t["HostPlugStoreTopologyDevice"] = reflect.TypeOf((*HostPlugStoreTopologyDevice)(nil)).Elem() - minAPIVersionForType["HostPlugStoreTopologyDevice"] = "4.0" } // This data object type is an association class that describes a Path and @@ -42368,7 +42262,6 @@ type HostPlugStoreTopologyPath struct { func init() { t["HostPlugStoreTopologyPath"] = reflect.TypeOf((*HostPlugStoreTopologyPath)(nil)).Elem() - minAPIVersionForType["HostPlugStoreTopologyPath"] = "4.0" } // This data object type represents a Plugin in the plug store architecture. @@ -42394,7 +42287,6 @@ type HostPlugStoreTopologyPlugin struct { func init() { t["HostPlugStoreTopologyPlugin"] = reflect.TypeOf((*HostPlugStoreTopologyPlugin)(nil)).Elem() - minAPIVersionForType["HostPlugStoreTopologyPlugin"] = "4.0" } // This data object represents target information. @@ -42412,7 +42304,6 @@ type HostPlugStoreTopologyTarget struct { func init() { t["HostPlugStoreTopologyTarget"] = reflect.TypeOf((*HostPlugStoreTopologyTarget)(nil)).Elem() - minAPIVersionForType["HostPlugStoreTopologyTarget"] = "4.0" } // This data type describes the avaialable capacity @@ -42433,7 +42324,6 @@ type HostPnicNetworkResourceInfo struct { func init() { t["HostPnicNetworkResourceInfo"] = reflect.TypeOf((*HostPnicNetworkResourceInfo)(nil)).Elem() - minAPIVersionForType["HostPnicNetworkResourceInfo"] = "6.0" } // This data object type is used to describe port groups. @@ -42530,7 +42420,6 @@ type HostPortGroupProfile struct { func init() { t["HostPortGroupProfile"] = reflect.TypeOf((*HostPortGroupProfile)(nil)).Elem() - minAPIVersionForType["HostPortGroupProfile"] = "4.0" } // This data object type describes the PortGroup specification @@ -42544,11 +42433,11 @@ type HostPortGroupSpec struct { // The VLAN ID for ports using this port group. // // Possible values: - // - A value of 0 specifies that you do not want the port group associated - // with a VLAN. - // - A value from 1 to 4094 specifies a VLAN ID for the port group. - // - A value of 4095 specifies that the port group should use trunk mode, - // which allows the guest operating system to manage its own VLAN tags. + // - A value of 0 specifies that you do not want the port group associated + // with a VLAN. + // - A value from 1 to 4094 specifies a VLAN ID for the port group. + // - A value of 4095 specifies that the port group should use trunk mode, + // which allows the guest operating system to manage its own VLAN tags. VlanId int32 `xml:"vlanId" json:"vlanId"` // The identifier of the virtual switch on which // this port group is located. @@ -42608,7 +42497,6 @@ type HostPowerOpFailed struct { func init() { t["HostPowerOpFailed"] = reflect.TypeOf((*HostPowerOpFailed)(nil)).Elem() - minAPIVersionForType["HostPowerOpFailed"] = "2.5" } type HostPowerOpFailedFault BaseHostPowerOpFailed @@ -42642,7 +42530,6 @@ type HostPowerPolicy struct { func init() { t["HostPowerPolicy"] = reflect.TypeOf((*HostPowerPolicy)(nil)).Elem() - minAPIVersionForType["HostPowerPolicy"] = "4.1" } // Deprecated as of vSphere API 5.0, the event is no longer relevant. @@ -42660,7 +42547,6 @@ type HostPrimaryAgentNotShortNameEvent struct { func init() { t["HostPrimaryAgentNotShortNameEvent"] = reflect.TypeOf((*HostPrimaryAgentNotShortNameEvent)(nil)).Elem() - minAPIVersionForType["HostPrimaryAgentNotShortNameEvent"] = "2.5" } // This event records that a Profile application was done @@ -42674,7 +42560,6 @@ type HostProfileAppliedEvent struct { func init() { t["HostProfileAppliedEvent"] = reflect.TypeOf((*HostProfileAppliedEvent)(nil)).Elem() - minAPIVersionForType["HostProfileAppliedEvent"] = "4.0" } // The `HostProfileCompleteConfigSpec` data object @@ -42716,13 +42601,13 @@ type HostProfileCompleteConfigSpec struct { // to validate the profile. // // Refers instance of `HostSystem`. - ValidatorHost *ManagedObjectReference `xml:"validatorHost,omitempty" json:"validatorHost,omitempty" vim:"5.0"` + ValidatorHost *ManagedObjectReference `xml:"validatorHost,omitempty" json:"validatorHost,omitempty"` // If "false", then the host profile will be saved without being validated. // // The default if not specified is "true". // This option should be used with caution, since the resulting host profile // will not be checked for errors. - Validating *bool `xml:"validating" json:"validating,omitempty" vim:"6.0"` + Validating *bool `xml:"validating" json:"validating,omitempty"` // Host profile configuration data and compliance information. // // If `HostProfileCompleteConfigSpec.hostConfig` is set, @@ -42731,12 +42616,11 @@ type HostProfileCompleteConfigSpec struct { // ComplianceProfile // `HostProfileCompleteConfigSpec.customComplyProfile` // should not be set in CompleteConfigSpec. - HostConfig *HostProfileConfigInfo `xml:"hostConfig,omitempty" json:"hostConfig,omitempty" vim:"6.5"` + HostConfig *HostProfileConfigInfo `xml:"hostConfig,omitempty" json:"hostConfig,omitempty"` } func init() { t["HostProfileCompleteConfigSpec"] = reflect.TypeOf((*HostProfileCompleteConfigSpec)(nil)).Elem() - minAPIVersionForType["HostProfileCompleteConfigSpec"] = "4.0" } // The `HostProfileConfigInfo` data object @@ -42773,12 +42657,11 @@ type HostProfileConfigInfo struct { // All expressions are enabled by default. DisabledExpressionList []string `xml:"disabledExpressionList,omitempty" json:"disabledExpressionList,omitempty"` // Localized description of the profile. - Description *ProfileDescription `xml:"description,omitempty" json:"description,omitempty" vim:"6.5"` + Description *ProfileDescription `xml:"description,omitempty" json:"description,omitempty"` } func init() { t["HostProfileConfigInfo"] = reflect.TypeOf((*HostProfileConfigInfo)(nil)).Elem() - minAPIVersionForType["HostProfileConfigInfo"] = "4.0" } // `HostProfileConfigSpec` is the base data object @@ -42789,7 +42672,6 @@ type HostProfileConfigSpec struct { func init() { t["HostProfileConfigSpec"] = reflect.TypeOf((*HostProfileConfigSpec)(nil)).Elem() - minAPIVersionForType["HostProfileConfigSpec"] = "4.0" } // The `HostProfileHostBasedConfigSpec` data object @@ -42809,12 +42691,11 @@ type HostProfileHostBasedConfigSpec struct { // (or later) profile plug-ins. The resulting profile is not compatible // with legacy hosts (pre 5.0). If false or not specified, // the Profile Engine creates a legacy host profile. - UseHostProfileEngine *bool `xml:"useHostProfileEngine" json:"useHostProfileEngine,omitempty" vim:"5.0"` + UseHostProfileEngine *bool `xml:"useHostProfileEngine" json:"useHostProfileEngine,omitempty"` } func init() { t["HostProfileHostBasedConfigSpec"] = reflect.TypeOf((*HostProfileHostBasedConfigSpec)(nil)).Elem() - minAPIVersionForType["HostProfileHostBasedConfigSpec"] = "4.0" } // The data class for host profile composition result. @@ -42832,7 +42713,6 @@ type HostProfileManagerCompositionResult struct { func init() { t["HostProfileManagerCompositionResult"] = reflect.TypeOf((*HostProfileManagerCompositionResult)(nil)).Elem() - minAPIVersionForType["HostProfileManagerCompositionResult"] = "6.5" } // Composition result for a specific target host profile. @@ -42854,7 +42734,6 @@ type HostProfileManagerCompositionResultResultElement struct { func init() { t["HostProfileManagerCompositionResultResultElement"] = reflect.TypeOf((*HostProfileManagerCompositionResultResultElement)(nil)).Elem() - minAPIVersionForType["HostProfileManagerCompositionResultResultElement"] = "6.5" } // The data class for the host profile composition validation @@ -42872,7 +42751,6 @@ type HostProfileManagerCompositionValidationResult struct { func init() { t["HostProfileManagerCompositionValidationResult"] = reflect.TypeOf((*HostProfileManagerCompositionValidationResult)(nil)).Elem() - minAPIVersionForType["HostProfileManagerCompositionValidationResult"] = "6.5" } // The host profile composition validation result for a specific target @@ -42920,7 +42798,6 @@ type HostProfileManagerCompositionValidationResultResultElement struct { func init() { t["HostProfileManagerCompositionValidationResultResultElement"] = reflect.TypeOf((*HostProfileManagerCompositionValidationResultResultElement)(nil)).Elem() - minAPIVersionForType["HostProfileManagerCompositionValidationResultResultElement"] = "6.5" } // The `HostProfileManagerConfigTaskList` data object @@ -42941,12 +42818,11 @@ type HostProfileManagerConfigTaskList struct { // or whether the host will need to be rebooted after applying the configSpec. // See `HostProfileManagerTaskListRequirement_enum` for // details of supported values. - TaskListRequirement []string `xml:"taskListRequirement,omitempty" json:"taskListRequirement,omitempty" vim:"6.0"` + TaskListRequirement []string `xml:"taskListRequirement,omitempty" json:"taskListRequirement,omitempty"` } func init() { t["HostProfileManagerConfigTaskList"] = reflect.TypeOf((*HostProfileManagerConfigTaskList)(nil)).Elem() - minAPIVersionForType["HostProfileManagerConfigTaskList"] = "4.0" } // Data class for HostSystem-AnswerFileCreateSpec @@ -42964,7 +42840,6 @@ type HostProfileManagerHostToConfigSpecMap struct { func init() { t["HostProfileManagerHostToConfigSpecMap"] = reflect.TypeOf((*HostProfileManagerHostToConfigSpecMap)(nil)).Elem() - minAPIVersionForType["HostProfileManagerHostToConfigSpecMap"] = "6.5" } type HostProfileResetValidationState HostProfileResetValidationStateRequestType @@ -43004,12 +42879,11 @@ type HostProfileSerializedHostProfileSpec struct { // The default if not specified is "true". // This option should be used with caution, since the resulting host profile // will not be checked for errors. - Validating *bool `xml:"validating" json:"validating,omitempty" vim:"6.0"` + Validating *bool `xml:"validating" json:"validating,omitempty"` } func init() { t["HostProfileSerializedHostProfileSpec"] = reflect.TypeOf((*HostProfileSerializedHostProfileSpec)(nil)).Elem() - minAPIVersionForType["HostProfileSerializedHostProfileSpec"] = "5.0" } // This defines the validation result for the host profile. @@ -43039,7 +42913,6 @@ type HostProfileValidationFailureInfo struct { func init() { t["HostProfileValidationFailureInfo"] = reflect.TypeOf((*HostProfileValidationFailureInfo)(nil)).Elem() - minAPIVersionForType["HostProfileValidationFailureInfo"] = "6.7" } // Data type used to contain a representation of host or cluster customization @@ -43053,7 +42926,6 @@ type HostProfilesEntityCustomizations struct { func init() { t["HostProfilesEntityCustomizations"] = reflect.TypeOf((*HostProfilesEntityCustomizations)(nil)).Elem() - minAPIVersionForType["HostProfilesEntityCustomizations"] = "6.5" } // ProtocolEndpoint is configured LUN or NFS directory @@ -43069,7 +42941,7 @@ type HostProtocolEndpoint struct { PeType string `xml:"peType" json:"peType"` // Type of ProtocolEndpoint // See `HostProtocolEndpointProtocolEndpointType_enum` - Type string `xml:"type,omitempty" json:"type,omitempty" vim:"6.5"` + Type string `xml:"type,omitempty" json:"type,omitempty"` // Identifier for PE assigned by VASA Provider Uuid string `xml:"uuid" json:"uuid"` // Set of ESX hosts which can see the same PE @@ -43085,20 +42957,21 @@ type HostProtocolEndpoint struct { // NFSv3 and NFSv4x PE will contain information about NFS directory NfsDir string `xml:"nfsDir,omitempty" json:"nfsDir,omitempty"` // NFSv4x PE will contain information about NFSv4x Server Scope - NfsServerScope string `xml:"nfsServerScope,omitempty" json:"nfsServerScope,omitempty" vim:"6.5"` + NfsServerScope string `xml:"nfsServerScope,omitempty" json:"nfsServerScope,omitempty"` // NFSv4x PE will contain information about NFSv4x Server Major - NfsServerMajor string `xml:"nfsServerMajor,omitempty" json:"nfsServerMajor,omitempty" vim:"6.5"` + NfsServerMajor string `xml:"nfsServerMajor,omitempty" json:"nfsServerMajor,omitempty"` // NFSv4x PE will contain information about NFSv4x Server Auth-type - NfsServerAuthType string `xml:"nfsServerAuthType,omitempty" json:"nfsServerAuthType,omitempty" vim:"6.5"` + NfsServerAuthType string `xml:"nfsServerAuthType,omitempty" json:"nfsServerAuthType,omitempty"` // NFSv4x PE will contain information about NFSv4x Server User - NfsServerUser string `xml:"nfsServerUser,omitempty" json:"nfsServerUser,omitempty" vim:"6.5"` + NfsServerUser string `xml:"nfsServerUser,omitempty" json:"nfsServerUser,omitempty"` // SCSI PE will contain information about SCSI device ID DeviceId string `xml:"deviceId,omitempty" json:"deviceId,omitempty"` + // Indicates whether the PE is being used to access a stretch-capable container + UsedByStretchedContainer *bool `xml:"usedByStretchedContainer" json:"usedByStretchedContainer,omitempty" vim:"8.0.3.0"` } func init() { t["HostProtocolEndpoint"] = reflect.TypeOf((*HostProtocolEndpoint)(nil)).Elem() - minAPIVersionForType["HostProtocolEndpoint"] = "6.0" } // The HostProxySwitch is a software entity which represents the component @@ -43120,7 +42993,7 @@ type HostProxySwitch struct { // // If configured number of ports is changed, // a host reboot is required for the new value to take effect. - ConfigNumPorts int32 `xml:"configNumPorts,omitempty" json:"configNumPorts,omitempty" vim:"5.0"` + ConfigNumPorts int32 `xml:"configNumPorts,omitempty" json:"configNumPorts,omitempty"` // The number of ports that are available on this virtual switch. NumPortsAvailable int32 `xml:"numPortsAvailable" json:"numPortsAvailable"` // The list of ports that can be potentially used by physical nics. @@ -43136,23 +43009,23 @@ type HostProxySwitch struct { Spec HostProxySwitchSpec `xml:"spec" json:"spec"` // The Link Aggregation Control Protocol group and // Uplink ports in the group. - HostLag []HostProxySwitchHostLagConfig `xml:"hostLag,omitempty" json:"hostLag,omitempty" vim:"5.5"` + HostLag []HostProxySwitchHostLagConfig `xml:"hostLag,omitempty" json:"hostLag,omitempty"` // Indicates whether network reservation is supported on this switch - NetworkReservationSupported *bool `xml:"networkReservationSupported" json:"networkReservationSupported,omitempty" vim:"5.5"` + NetworkReservationSupported *bool `xml:"networkReservationSupported" json:"networkReservationSupported,omitempty"` // Indicate whether NSX-T is enabled on this switch - NsxtEnabled *bool `xml:"nsxtEnabled" json:"nsxtEnabled,omitempty" vim:"7.0"` + NsxtEnabled *bool `xml:"nsxtEnabled" json:"nsxtEnabled,omitempty"` // Is ENS enabled on this switch - EnsEnabled *bool `xml:"ensEnabled" json:"ensEnabled,omitempty" vim:"7.0"` + EnsEnabled *bool `xml:"ensEnabled" json:"ensEnabled,omitempty"` // Is ENS interrupt mode enabled on this switch - EnsInterruptEnabled *bool `xml:"ensInterruptEnabled" json:"ensInterruptEnabled,omitempty" vim:"7.0"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled" json:"ensInterruptEnabled,omitempty"` // Transport Zones this switch joined - TransportZones []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"transportZones,omitempty" json:"transportZones,omitempty" vim:"7.0"` + TransportZones []DistributedVirtualSwitchHostMemberTransportZoneInfo `xml:"transportZones,omitempty" json:"transportZones,omitempty"` // Uplink port names used by NSX-T - NsxUsedUplinkPort []string `xml:"nsxUsedUplinkPort,omitempty" json:"nsxUsedUplinkPort,omitempty" vim:"7.0"` + NsxUsedUplinkPort []string `xml:"nsxUsedUplinkPort,omitempty" json:"nsxUsedUplinkPort,omitempty"` // NSX-T proxy switch status - NsxtStatus string `xml:"nsxtStatus,omitempty" json:"nsxtStatus,omitempty" vim:"7.0"` + NsxtStatus string `xml:"nsxtStatus,omitempty" json:"nsxtStatus,omitempty"` // Additional information regarding the NSX-T proxy switch status - NsxtStatusDetail string `xml:"nsxtStatusDetail,omitempty" json:"nsxtStatusDetail,omitempty" vim:"7.0"` + NsxtStatusDetail string `xml:"nsxtStatusDetail,omitempty" json:"nsxtStatusDetail,omitempty"` // ENS Status From VmKernal. EnsInfo *HostProxySwitchEnsInfo `xml:"ensInfo,omitempty" json:"ensInfo,omitempty" vim:"8.0.0.1"` // Indicate if network offloading is enabled on the proxy switch of @@ -43160,11 +43033,15 @@ type HostProxySwitch struct { // // Unset implies that network offloading is disabled. NetworkOffloadingEnabled *bool `xml:"networkOffloadingEnabled" json:"networkOffloadingEnabled,omitempty" vim:"8.0.0.1"` + // Indicates the runtime state of uplinks on the host. + // + // Only set when `HostProxySwitch.networkOffloadingEnabled` + // is true. + HostUplinkState []DistributedVirtualSwitchHostMemberHostUplinkState `xml:"hostUplinkState,omitempty" json:"hostUplinkState,omitempty" vim:"8.0.3.0"` } func init() { t["HostProxySwitch"] = reflect.TypeOf((*HostProxySwitch)(nil)).Elem() - minAPIVersionForType["HostProxySwitch"] = "4.0" } // This data object type describes the HostProxySwitch configuration @@ -43177,8 +43054,8 @@ type HostProxySwitchConfig struct { // this configuration specification. // // Valid values are: - // - `edit` - // - `remove` + // - `edit` + // - `remove` // // See also `HostConfigChangeOperation_enum`. ChangeOperation string `xml:"changeOperation,omitempty" json:"changeOperation,omitempty"` @@ -43191,7 +43068,6 @@ type HostProxySwitchConfig struct { func init() { t["HostProxySwitchConfig"] = reflect.TypeOf((*HostProxySwitchConfig)(nil)).Elem() - minAPIVersionForType["HostProxySwitchConfig"] = "4.0" } // This data object type describes @@ -43231,7 +43107,6 @@ type HostProxySwitchHostLagConfig struct { func init() { t["HostProxySwitchHostLagConfig"] = reflect.TypeOf((*HostProxySwitchHostLagConfig)(nil)).Elem() - minAPIVersionForType["HostProxySwitchHostLagConfig"] = "5.5" } // This data object type describes the HostProxySwitch specification @@ -43247,7 +43122,6 @@ type HostProxySwitchSpec struct { func init() { t["HostProxySwitchSpec"] = reflect.TypeOf((*HostProxySwitchSpec)(nil)).Elem() - minAPIVersionForType["HostProxySwitchSpec"] = "4.0" } // Configuration information for the host PTP (Precision Time @@ -43332,6 +43206,47 @@ func init() { minAPIVersionForType["HostQualifiedName"] = "7.0.3.0" } +type HostQueryVirtualDiskUuid HostQueryVirtualDiskUuidRequestType + +func init() { + t["HostQueryVirtualDiskUuid"] = reflect.TypeOf((*HostQueryVirtualDiskUuid)(nil)).Elem() +} + +// The parameters of `HostVStorageObjectManager.HostQueryVirtualDiskUuid`. +type HostQueryVirtualDiskUuidRequestType struct { + This ManagedObjectReference `xml:"_this" json:"-"` + // The name of the disk, either a datastore path or a URL + // referring to the virtual disk whose uuid for the DDB entry needs to be queried. + // A URL has the form + // > _scheme_://_authority_/folder/_path_?dsName=_dsName_ + // + // where + // - _scheme_ is http or https. + // - _authority_ specifies the hostname or IP address of the VirtualCenter or + // ESX server and optionally the port. + // - _dsName_ is the name of the Datastore. + // - _path_ is a slash-delimited path from the root of the datastore. + // + // A datastore path has the form + // > \[_datastore_\] _path_ + // + // where + // - _datastore_ is the datastore name. + // - _path_ is a slash-delimited path from the root of the datastore. + // + // An example datastore path is "\[storage\] path/to/file.extension". + Name string `xml:"name" json:"name"` +} + +func init() { + t["HostQueryVirtualDiskUuidRequestType"] = reflect.TypeOf((*HostQueryVirtualDiskUuidRequestType)(nil)).Elem() + minAPIVersionForType["HostQueryVirtualDiskUuidRequestType"] = "8.0.3.0" +} + +type HostQueryVirtualDiskUuidResponse struct { + Returnval string `xml:"returnval" json:"returnval"` +} + // This data object represents a Remote Direct Memory Access // device as seen by the primary operating system. type HostRdmaDevice struct { @@ -43357,7 +43272,6 @@ type HostRdmaDevice struct { func init() { t["HostRdmaDevice"] = reflect.TypeOf((*HostRdmaDevice)(nil)).Elem() - minAPIVersionForType["HostRdmaDevice"] = "7.0" } // This data object represents the physical @@ -43368,7 +43282,6 @@ type HostRdmaDeviceBacking struct { func init() { t["HostRdmaDeviceBacking"] = reflect.TypeOf((*HostRdmaDeviceBacking)(nil)).Elem() - minAPIVersionForType["HostRdmaDeviceBacking"] = "7.0" } // Represents device capabilies, e.g. @@ -43387,7 +43300,6 @@ type HostRdmaDeviceCapability struct { func init() { t["HostRdmaDeviceCapability"] = reflect.TypeOf((*HostRdmaDeviceCapability)(nil)).Elem() - minAPIVersionForType["HostRdmaDeviceCapability"] = "7.0" } // Represents connection information for the RDMA device. @@ -43407,7 +43319,6 @@ type HostRdmaDeviceConnectionInfo struct { func init() { t["HostRdmaDeviceConnectionInfo"] = reflect.TypeOf((*HostRdmaDeviceConnectionInfo)(nil)).Elem() - minAPIVersionForType["HostRdmaDeviceConnectionInfo"] = "7.0" } // This data object represents a physical NIC backing @@ -43428,7 +43339,6 @@ type HostRdmaDevicePnicBacking struct { func init() { t["HostRdmaDevicePnicBacking"] = reflect.TypeOf((*HostRdmaDevicePnicBacking)(nil)).Elem() - minAPIVersionForType["HostRdmaDevicePnicBacking"] = "7.0" } // This data object describes the Remote Direct Memory Access @@ -43445,7 +43355,6 @@ type HostRdmaHba struct { func init() { t["HostRdmaHba"] = reflect.TypeOf((*HostRdmaHba)(nil)).Elem() - minAPIVersionForType["HostRdmaHba"] = "7.0" } // Remote Direct Memory Access (RDMA) transport @@ -43456,7 +43365,6 @@ type HostRdmaTargetTransport struct { func init() { t["HostRdmaTargetTransport"] = reflect.TypeOf((*HostRdmaTargetTransport)(nil)).Elem() - minAPIVersionForType["HostRdmaTargetTransport"] = "7.0" } // The parameters of `HostVStorageObjectManager.HostReconcileDatastoreInventory_Task`. @@ -43527,7 +43435,6 @@ type HostReliableMemoryInfo struct { func init() { t["HostReliableMemoryInfo"] = reflect.TypeOf((*HostReliableMemoryInfo)(nil)).Elem() - minAPIVersionForType["HostReliableMemoryInfo"] = "5.5" } // The parameters of `HostVStorageObjectManager.HostRelocateVStorageObject_Task`. @@ -43643,7 +43550,6 @@ type HostResignatureRescanResult struct { func init() { t["HostResignatureRescanResult"] = reflect.TypeOf((*HostResignatureRescanResult)(nil)).Elem() - minAPIVersionForType["HostResignatureRescanResult"] = "4.0" } type HostRetrieveVStorageInfrastructureObjectPolicy HostRetrieveVStorageInfrastructureObjectPolicyRequestType @@ -43798,14 +43704,14 @@ type HostRuntimeInfo struct { // // See the description in the enums for the // `PowerState` data object type. - PowerState HostSystemPowerState `xml:"powerState" json:"powerState" vim:"2.5"` + PowerState HostSystemPowerState `xml:"powerState" json:"powerState"` // The host's standby mode. // // For valid values see // `HostStandbyMode_enum`. The property is only populated by // vCenter server. If queried directly from a ESX host, the property is // is unset. - StandbyMode string `xml:"standbyMode,omitempty" json:"standbyMode,omitempty" vim:"4.1"` + StandbyMode string `xml:"standbyMode,omitempty" json:"standbyMode,omitempty"` // The flag to indicate whether or not the host is in maintenance mode. // // This @@ -43826,11 +43732,11 @@ type HostRuntimeInfo struct { // affect VM performance. // // See also `HealthUpdateManager`, `ClusterInfraUpdateHaConfigInfo`, `ClusterHostInfraUpdateHaModeAction`. - InQuarantineMode *bool `xml:"inQuarantineMode" json:"inQuarantineMode,omitempty" vim:"6.5"` + InQuarantineMode *bool `xml:"inQuarantineMode" json:"inQuarantineMode,omitempty"` // The time when the host was booted. BootTime *time.Time `xml:"bootTime" json:"bootTime,omitempty"` // Available system health status - HealthSystemRuntime *HealthSystemRuntime `xml:"healthSystemRuntime,omitempty" json:"healthSystemRuntime,omitempty" vim:"2.5"` + HealthSystemRuntime *HealthSystemRuntime `xml:"healthSystemRuntime,omitempty" json:"healthSystemRuntime,omitempty"` // The availability state of an active host in a vSphere HA enabled // cluster. // @@ -43841,34 +43747,37 @@ type HostRuntimeInfo struct { // The property is unset if vSphere HA is disabled, the host is // in maintenance or standby mode, or the host is disconnected from // vCenter Server. The property is set to hostDown if the host has crashed. - DasHostState *ClusterDasFdmHostState `xml:"dasHostState,omitempty" json:"dasHostState,omitempty" vim:"5.0"` + DasHostState *ClusterDasFdmHostState `xml:"dasHostState,omitempty" json:"dasHostState,omitempty"` // Deprecated as of @released("5.1") this information should be // considered to be neither complete nor reliable. // // The array of PCR digest values stored in the TPM device since the last // host boot time. - TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues,omitempty" json:"tpmPcrValues,omitempty" vim:"4.0"` + TpmPcrValues []HostTpmDigestInfo `xml:"tpmPcrValues,omitempty" json:"tpmPcrValues,omitempty"` // Host Runtime information related to the VSAN service. // // See also `VsanHostRuntimeInfo`. - VsanRuntimeInfo *VsanHostRuntimeInfo `xml:"vsanRuntimeInfo,omitempty" json:"vsanRuntimeInfo,omitempty" vim:"5.5"` + VsanRuntimeInfo *VsanHostRuntimeInfo `xml:"vsanRuntimeInfo,omitempty" json:"vsanRuntimeInfo,omitempty"` // This property is for getting network related runtime info - NetworkRuntimeInfo *HostRuntimeInfoNetworkRuntimeInfo `xml:"networkRuntimeInfo,omitempty" json:"networkRuntimeInfo,omitempty" vim:"5.5"` + NetworkRuntimeInfo *HostRuntimeInfoNetworkRuntimeInfo `xml:"networkRuntimeInfo,omitempty" json:"networkRuntimeInfo,omitempty"` // Runtime information of vFlash resource of the host. - VFlashResourceRuntimeInfo *HostVFlashManagerVFlashResourceRunTimeInfo `xml:"vFlashResourceRuntimeInfo,omitempty" json:"vFlashResourceRuntimeInfo,omitempty" vim:"5.5"` + VFlashResourceRuntimeInfo *HostVFlashManagerVFlashResourceRunTimeInfo `xml:"vFlashResourceRuntimeInfo,omitempty" json:"vFlashResourceRuntimeInfo,omitempty"` // The maximum theoretical virtual disk capacity supported by this host - HostMaxVirtualDiskCapacity int64 `xml:"hostMaxVirtualDiskCapacity,omitempty" json:"hostMaxVirtualDiskCapacity,omitempty" vim:"5.5"` + HostMaxVirtualDiskCapacity int64 `xml:"hostMaxVirtualDiskCapacity,omitempty" json:"hostMaxVirtualDiskCapacity,omitempty"` // Encryption state of the host. // // Valid values are enumerated by the // `CryptoState` type. - CryptoState string `xml:"cryptoState,omitempty" json:"cryptoState,omitempty" vim:"6.5"` + CryptoState string `xml:"cryptoState,omitempty" json:"cryptoState,omitempty"` // Crypto Key used for coredump encryption - CryptoKeyId *CryptoKeyId `xml:"cryptoKeyId,omitempty" json:"cryptoKeyId,omitempty" vim:"6.5"` + CryptoKeyId *CryptoKeyId `xml:"cryptoKeyId,omitempty" json:"cryptoKeyId,omitempty"` // Indicating the host is ready for NVDS to VDS migration. // // See `HostRuntimeInfoStatelessNvdsMigrationState_enum` for supported values. StatelessNvdsMigrationReady string `xml:"statelessNvdsMigrationReady,omitempty" json:"statelessNvdsMigrationReady,omitempty" vim:"7.0.2.0"` + // The following list contains the runtime status for all the partial + // maintenance modes currently supported on the host. + PartialMaintenanceMode []HostPartialMaintenanceModeRuntimeInfo `xml:"partialMaintenanceMode,omitempty" json:"partialMaintenanceMode,omitempty" vim:"8.0.3.0"` // Host persistent state encryption information. StateEncryption *HostRuntimeInfoStateEncryptionInfo `xml:"stateEncryption,omitempty" json:"stateEncryption,omitempty" vim:"7.0.3.0"` } @@ -43897,7 +43806,6 @@ type HostRuntimeInfoNetStackInstanceRuntimeInfo struct { func init() { t["HostRuntimeInfoNetStackInstanceRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfoNetStackInstanceRuntimeInfo)(nil)).Elem() - minAPIVersionForType["HostRuntimeInfoNetStackInstanceRuntimeInfo"] = "5.5" } // This data type describes network related runtime info @@ -43907,12 +43815,11 @@ type HostRuntimeInfoNetworkRuntimeInfo struct { // The list of network stack runtime info NetStackInstanceRuntimeInfo []HostRuntimeInfoNetStackInstanceRuntimeInfo `xml:"netStackInstanceRuntimeInfo,omitempty" json:"netStackInstanceRuntimeInfo,omitempty"` // The network resource runtime information - NetworkResourceRuntime *HostNetworkResourceRuntime `xml:"networkResourceRuntime,omitempty" json:"networkResourceRuntime,omitempty" vim:"6.0"` + NetworkResourceRuntime *HostNetworkResourceRuntime `xml:"networkResourceRuntime,omitempty" json:"networkResourceRuntime,omitempty"` } func init() { t["HostRuntimeInfoNetworkRuntimeInfo"] = reflect.TypeOf((*HostRuntimeInfoNetworkRuntimeInfo)(nil)).Elem() - minAPIVersionForType["HostRuntimeInfoNetworkRuntimeInfo"] = "5.5" } // This data type describes the host's persistent state encryption. @@ -43982,31 +43889,36 @@ type HostScsiDisk struct { // // If unset, the information whether the ScsiDisk is SSD backed // is unknown. - Ssd *bool `xml:"ssd" json:"ssd,omitempty" vim:"5.0"` + Ssd *bool `xml:"ssd" json:"ssd,omitempty"` // Indicates whether the ScsiDisk is local. // // If unset, the information whether the ScsiDisk is local is unknown. - LocalDisk *bool `xml:"localDisk" json:"localDisk,omitempty" vim:"6.0"` + LocalDisk *bool `xml:"localDisk" json:"localDisk,omitempty"` // The physical location of the ScsiDisk if can be determined, otherwise // unset. // // If the ScsiDisk is a logical drive, it should be the // location of all constituent physical drives of the logical drive. // If the ScsiDisk is a physical drive, it's an array of one element. - PhysicalLocation []string `xml:"physicalLocation,omitempty" json:"physicalLocation,omitempty" vim:"6.0"` + PhysicalLocation []string `xml:"physicalLocation,omitempty" json:"physicalLocation,omitempty"` // Indicates whether the ScsiDisk has emulated Data Integrity Extension // (DIX) / Data Integrity Field (DIF) enabled. // // If unset, the default value is false. - EmulatedDIXDIFEnabled *bool `xml:"emulatedDIXDIFEnabled" json:"emulatedDIXDIFEnabled,omitempty" vim:"6.0"` + EmulatedDIXDIFEnabled *bool `xml:"emulatedDIXDIFEnabled" json:"emulatedDIXDIFEnabled,omitempty"` // Indicates the additional VSAN information // if this disk is used by VSAN. - VsanDiskInfo *VsanHostVsanDiskInfo `xml:"vsanDiskInfo,omitempty" json:"vsanDiskInfo,omitempty" vim:"6.0"` + VsanDiskInfo *VsanHostVsanDiskInfo `xml:"vsanDiskInfo,omitempty" json:"vsanDiskInfo,omitempty"` // The type of disk drives. // // See `ScsiDiskType_enum` // for definitions of supported types. - ScsiDiskType string `xml:"scsiDiskType,omitempty" json:"scsiDiskType,omitempty" vim:"6.5"` + ScsiDiskType string `xml:"scsiDiskType,omitempty" json:"scsiDiskType,omitempty"` + // Indicate whether the disk is used for + // memory tiering or not. + // + // If unset, the default value is false. + UsedByMemoryTiering *bool `xml:"usedByMemoryTiering" json:"usedByMemoryTiering,omitempty" vim:"8.0.3.0"` } func init() { @@ -44128,14 +44040,13 @@ type HostSecuritySpec struct { // Administrator password to configure AdminPassword string `xml:"adminPassword,omitempty" json:"adminPassword,omitempty"` // Permissions to remove - RemovePermission []Permission `xml:"removePermission,omitempty" json:"removePermission,omitempty" vim:"4.1"` + RemovePermission []Permission `xml:"removePermission,omitempty" json:"removePermission,omitempty"` // Permissions to add - AddPermission []Permission `xml:"addPermission,omitempty" json:"addPermission,omitempty" vim:"4.1"` + AddPermission []Permission `xml:"addPermission,omitempty" json:"addPermission,omitempty"` } func init() { t["HostSecuritySpec"] = reflect.TypeOf((*HostSecuritySpec)(nil)).Elem() - minAPIVersionForType["HostSecuritySpec"] = "4.0" } // The data object type describes the @@ -44149,7 +44060,6 @@ type HostSerialAttachedHba struct { func init() { t["HostSerialAttachedHba"] = reflect.TypeOf((*HostSerialAttachedHba)(nil)).Elem() - minAPIVersionForType["HostSerialAttachedHba"] = "6.5" } // Serial attached adapter transport information about a SCSI target. @@ -44159,7 +44069,6 @@ type HostSerialAttachedTargetTransport struct { func init() { t["HostSerialAttachedTargetTransport"] = reflect.TypeOf((*HostSerialAttachedTargetTransport)(nil)).Elem() - minAPIVersionForType["HostSerialAttachedTargetTransport"] = "6.5" } // Data object that describes a single service that runs on the host. @@ -44188,7 +44097,7 @@ type HostService struct { // See also `HostServicePolicy_enum`. Policy string `xml:"policy" json:"policy"` // The source package associated with the service - SourcePackage *HostServiceSourcePackage `xml:"sourcePackage,omitempty" json:"sourcePackage,omitempty" vim:"5.0"` + SourcePackage *HostServiceSourcePackage `xml:"sourcePackage,omitempty" json:"sourcePackage,omitempty"` } func init() { @@ -44210,7 +44119,6 @@ type HostServiceConfig struct { func init() { t["HostServiceConfig"] = reflect.TypeOf((*HostServiceConfig)(nil)).Elem() - minAPIVersionForType["HostServiceConfig"] = "4.0" } // Data object describing the host service configuration. @@ -44277,7 +44185,7 @@ type HostServiceTicket struct { Port int32 `xml:"port,omitempty" json:"port,omitempty"` // The expected thumbprint of the SSL cert of the host to which // we are connecting. - SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty" vim:"2.5"` + SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty"` // The name of the service to which to connect. Service string `xml:"service" json:"service"` // A dot-separated string identifying the service protocol version. @@ -44329,6 +44237,50 @@ func init() { type HostSetVStorageObjectControlFlagsResponse struct { } +// The parameters of `HostVStorageObjectManager.HostSetVirtualDiskUuid_Task`. +type HostSetVirtualDiskUuidRequestType struct { + This ManagedObjectReference `xml:"_this" json:"-"` + // The name of the disk, either a datastore path or a URL + // referring to the virtual disk whose uuid for the DDB entry needs to be set. + // A URL has the form + // > _scheme_://_authority_/folder/_path_?dsName=_dsName_ + // + // where + // - _scheme_ is http or https. + // - _authority_ specifies the hostname or IP address of the VirtualCenter or + // ESX server and optionally the port. + // - _dsName_ is the name of the Datastore. + // - _path_ is a slash-delimited path from the root of the datastore. + // + // A datastore path has the form + // > \[_datastore_\] _path_ + // + // where + // - _datastore_ is the datastore name. + // - _path_ is a slash-delimited path from the root of the datastore. + // + // An example datastore path is "\[storage\] path/to/file.extension". + Name string `xml:"name" json:"name"` + // The hex representation of the unique ID for this virtual disk. If uuid is not set or missing, + // a random UUID is generated and assigned. + Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty"` +} + +func init() { + t["HostSetVirtualDiskUuidRequestType"] = reflect.TypeOf((*HostSetVirtualDiskUuidRequestType)(nil)).Elem() + minAPIVersionForType["HostSetVirtualDiskUuidRequestType"] = "8.0.3.0" +} + +type HostSetVirtualDiskUuid_Task HostSetVirtualDiskUuidRequestType + +func init() { + t["HostSetVirtualDiskUuid_Task"] = reflect.TypeOf((*HostSetVirtualDiskUuid_Task)(nil)).Elem() +} + +type HostSetVirtualDiskUuid_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval" json:"returnval"` +} + type HostSevInfo struct { DynamicData @@ -44343,6 +44295,7 @@ type HostSevInfo struct { func init() { t["HostSevInfo"] = reflect.TypeOf((*HostSevInfo)(nil)).Elem() + minAPIVersionForType["HostSevInfo"] = "7.0.1.0" } // Data object describing the Software Guard Extension (SGX) @@ -44369,12 +44322,11 @@ type HostSgxInfo struct { // enclave. This attribute is set only if attribute flcMode is // locked. LePubKeyHash string `xml:"lePubKeyHash,omitempty" json:"lePubKeyHash,omitempty"` - RegistrationInfo *HostSgxRegistrationInfo `xml:"registrationInfo,omitempty" json:"registrationInfo,omitempty"` + RegistrationInfo *HostSgxRegistrationInfo `xml:"registrationInfo,omitempty" json:"registrationInfo,omitempty" vim:"8.0.0.1"` } func init() { t["HostSgxInfo"] = reflect.TypeOf((*HostSgxInfo)(nil)).Elem() - minAPIVersionForType["HostSgxInfo"] = "7.0" } // Data object describing SGX host registration information. @@ -44446,7 +44398,6 @@ type HostSharedGpuCapabilities struct { func init() { t["HostSharedGpuCapabilities"] = reflect.TypeOf((*HostSharedGpuCapabilities)(nil)).Elem() - minAPIVersionForType["HostSharedGpuCapabilities"] = "6.7" } // Deprecated as of vSphere API 5.0, the event is no longer relevant. @@ -44465,7 +44416,6 @@ type HostShortNameInconsistentEvent struct { func init() { t["HostShortNameInconsistentEvent"] = reflect.TypeOf((*HostShortNameInconsistentEvent)(nil)).Elem() - minAPIVersionForType["HostShortNameInconsistentEvent"] = "2.5" } // Deprecated as of vSphere API 5.0, the event is no longer relevant. @@ -44479,7 +44429,6 @@ type HostShortNameToIpFailedEvent struct { func init() { t["HostShortNameToIpFailedEvent"] = reflect.TypeOf((*HostShortNameToIpFailedEvent)(nil)).Elem() - minAPIVersionForType["HostShortNameToIpFailedEvent"] = "2.5" } // This event records the shutdown of a host. @@ -44540,7 +44489,7 @@ type HostSnmpSystemAgentLimits struct { // SNMP input buffer size MaxBufferSize int32 `xml:"maxBufferSize" json:"maxBufferSize"` // Supported Capability for this agent - Capability HostSnmpAgentCapability `xml:"capability,omitempty" json:"capability,omitempty" vim:"4.0"` + Capability HostSnmpAgentCapability `xml:"capability,omitempty" json:"capability,omitempty"` } func init() { @@ -44608,7 +44557,6 @@ type HostSpecification struct { func init() { t["HostSpecification"] = reflect.TypeOf((*HostSpecification)(nil)).Elem() - minAPIVersionForType["HostSpecification"] = "6.5" } // This event records that the host specification was changed. @@ -44618,7 +44566,6 @@ type HostSpecificationChangedEvent struct { func init() { t["HostSpecificationChangedEvent"] = reflect.TypeOf((*HostSpecificationChangedEvent)(nil)).Elem() - minAPIVersionForType["HostSpecificationChangedEvent"] = "6.5" } // Fault thrown when an operation, on host specification or host sub @@ -44634,7 +44581,6 @@ type HostSpecificationOperationFailed struct { func init() { t["HostSpecificationOperationFailed"] = reflect.TypeOf((*HostSpecificationOperationFailed)(nil)).Elem() - minAPIVersionForType["HostSpecificationOperationFailed"] = "6.5" } type HostSpecificationOperationFailedFault HostSpecificationOperationFailed @@ -44650,7 +44596,6 @@ type HostSpecificationRequireEvent struct { func init() { t["HostSpecificationRequireEvent"] = reflect.TypeOf((*HostSpecificationRequireEvent)(nil)).Elem() - minAPIVersionForType["HostSpecificationRequireEvent"] = "6.5" } // This event suggests that update the host specification with the @@ -44663,7 +44608,6 @@ type HostSpecificationUpdateEvent struct { func init() { t["HostSpecificationUpdateEvent"] = reflect.TypeOf((*HostSpecificationUpdateEvent)(nil)).Elem() - minAPIVersionForType["HostSpecificationUpdateEvent"] = "6.5" } // This data object allows configuration of SR-IOV device. @@ -44678,7 +44622,6 @@ type HostSriovConfig struct { func init() { t["HostSriovConfig"] = reflect.TypeOf((*HostSriovConfig)(nil)).Elem() - minAPIVersionForType["HostSriovConfig"] = "5.5" } type HostSriovDevicePoolInfo struct { @@ -44711,7 +44654,6 @@ type HostSriovInfo struct { func init() { t["HostSriovInfo"] = reflect.TypeOf((*HostSriovInfo)(nil)).Elem() - minAPIVersionForType["HostSriovInfo"] = "5.5" } // Information on networking specific SR-IOV device pools @@ -44729,7 +44671,6 @@ type HostSriovNetworkDevicePoolInfo struct { func init() { t["HostSriovNetworkDevicePoolInfo"] = reflect.TypeOf((*HostSriovNetworkDevicePoolInfo)(nil)).Elem() - minAPIVersionForType["HostSriovNetworkDevicePoolInfo"] = "6.5" } // The SSL thumbprint information for a host managed by a vCenter Server @@ -44747,14 +44688,13 @@ type HostSslThumbprintInfo struct { // interfering with each other on the life cycle of the thumbprint with // their unique tags. // Each solution should use a unique tag to identify itself. - OwnerTag string `xml:"ownerTag,omitempty" json:"ownerTag,omitempty" vim:"5.0"` + OwnerTag string `xml:"ownerTag,omitempty" json:"ownerTag,omitempty"` // Specify the SSL thumbprints to register on the host. SslThumbprints []string `xml:"sslThumbprints,omitempty" json:"sslThumbprints,omitempty"` } func init() { t["HostSslThumbprintInfo"] = reflect.TypeOf((*HostSslThumbprintInfo)(nil)).Elem() - minAPIVersionForType["HostSslThumbprintInfo"] = "4.0" } // This event records when a host's overall status changed. @@ -44764,7 +44704,6 @@ type HostStatusChangedEvent struct { func init() { t["HostStatusChangedEvent"] = reflect.TypeOf((*HostStatusChangedEvent)(nil)).Elem() - minAPIVersionForType["HostStatusChangedEvent"] = "4.0" } // Description of options associated with a native multipathing @@ -44781,7 +44720,6 @@ type HostStorageArrayTypePolicyOption struct { func init() { t["HostStorageArrayTypePolicyOption"] = reflect.TypeOf((*HostStorageArrayTypePolicyOption)(nil)).Elem() - minAPIVersionForType["HostStorageArrayTypePolicyOption"] = "4.0" } // This data object type describes the storage subsystem configuration. @@ -44804,7 +44742,7 @@ type HostStorageDeviceInfo struct { // This data object exists // only if storage topology information is available. See the // `HostNvmeTopology` data object type for more information. - NvmeTopology *HostNvmeTopology `xml:"nvmeTopology,omitempty" json:"nvmeTopology,omitempty" vim:"7.0"` + NvmeTopology *HostNvmeTopology `xml:"nvmeTopology,omitempty" json:"nvmeTopology,omitempty"` // The multipath configuration that controls multipath policy for ScsiLuns. // // This data object exists only if path information is available and is @@ -44814,7 +44752,7 @@ type HostStorageDeviceInfo struct { // // This data object exists only if // the plug-store system is available and configurable. - PlugStoreTopology *HostPlugStoreTopology `xml:"plugStoreTopology,omitempty" json:"plugStoreTopology,omitempty" vim:"4.0"` + PlugStoreTopology *HostPlugStoreTopology `xml:"plugStoreTopology,omitempty" json:"plugStoreTopology,omitempty"` // Indicates if the software iSCSI initiator is enabled on this system SoftwareInternetScsiEnabled bool `xml:"softwareInternetScsiEnabled" json:"softwareInternetScsiEnabled"` } @@ -44835,7 +44773,6 @@ type HostStorageElementInfo struct { func init() { t["HostStorageElementInfo"] = reflect.TypeOf((*HostStorageElementInfo)(nil)).Elem() - minAPIVersionForType["HostStorageElementInfo"] = "2.5" } // Data class describing operational information of a storage element @@ -44850,7 +44787,6 @@ type HostStorageOperationalInfo struct { func init() { t["HostStorageOperationalInfo"] = reflect.TypeOf((*HostStorageOperationalInfo)(nil)).Elem() - minAPIVersionForType["HostStorageOperationalInfo"] = "2.5" } // Contains the result of turn Disk Locator Led On/Off request. @@ -44869,7 +44805,6 @@ type HostStorageSystemDiskLocatorLedResult struct { func init() { t["HostStorageSystemDiskLocatorLedResult"] = reflect.TypeOf((*HostStorageSystemDiskLocatorLedResult)(nil)).Elem() - minAPIVersionForType["HostStorageSystemDiskLocatorLedResult"] = "6.0" } // Contains the result of SCSI LUN operation requests. @@ -44889,7 +44824,6 @@ type HostStorageSystemScsiLunResult struct { func init() { t["HostStorageSystemScsiLunResult"] = reflect.TypeOf((*HostStorageSystemScsiLunResult)(nil)).Elem() - minAPIVersionForType["HostStorageSystemScsiLunResult"] = "6.0" } // Contains the result of the operation performed on a VMFS volume. @@ -44904,7 +44838,6 @@ type HostStorageSystemVmfsVolumeResult struct { func init() { t["HostStorageSystemVmfsVolumeResult"] = reflect.TypeOf((*HostStorageSystemVmfsVolumeResult)(nil)).Elem() - minAPIVersionForType["HostStorageSystemVmfsVolumeResult"] = "6.0" } // Host sub specification data are the data used when create a virtual @@ -44944,14 +44877,13 @@ type HostSubSpecification struct { // Time at which the host sub specification was created. CreatedTime time.Time `xml:"createdTime" json:"createdTime"` // The host sub specification data - Data []byte `xml:"data,omitempty" json:"data,omitempty"` + Data ByteSlice `xml:"data,omitempty" json:"data,omitempty"` // The host sub specification data in Binary for wire efficiency. - BinaryData []byte `xml:"binaryData,omitempty" json:"binaryData,omitempty" vim:"6.7"` + BinaryData []byte `xml:"binaryData,omitempty" json:"binaryData,omitempty"` } func init() { t["HostSubSpecification"] = reflect.TypeOf((*HostSubSpecification)(nil)).Elem() - minAPIVersionForType["HostSubSpecification"] = "6.5" } // This event suggests that delete the host sub specification specified by @@ -44964,7 +44896,6 @@ type HostSubSpecificationDeleteEvent struct { func init() { t["HostSubSpecificationDeleteEvent"] = reflect.TypeOf((*HostSubSpecificationDeleteEvent)(nil)).Elem() - minAPIVersionForType["HostSubSpecificationDeleteEvent"] = "6.5" } // This event suggests that update the host sub specification with the @@ -44977,7 +44908,6 @@ type HostSubSpecificationUpdateEvent struct { func init() { t["HostSubSpecificationUpdateEvent"] = reflect.TypeOf((*HostSubSpecificationUpdateEvent)(nil)).Elem() - minAPIVersionForType["HostSubSpecificationUpdateEvent"] = "6.5" } // This event records a failure to sync up with the VirtualCenter agent on the host @@ -44990,7 +44920,6 @@ type HostSyncFailedEvent struct { func init() { t["HostSyncFailedEvent"] = reflect.TypeOf((*HostSyncFailedEvent)(nil)).Elem() - minAPIVersionForType["HostSyncFailedEvent"] = "4.0" } // The host profile compliance check state. @@ -45009,7 +44938,6 @@ type HostSystemComplianceCheckState struct { func init() { t["HostSystemComplianceCheckState"] = reflect.TypeOf((*HostSystemComplianceCheckState)(nil)).Elem() - minAPIVersionForType["HostSystemComplianceCheckState"] = "6.7" } // This data object provides information about the health of the phyical @@ -45025,7 +44953,6 @@ type HostSystemHealthInfo struct { func init() { t["HostSystemHealthInfo"] = reflect.TypeOf((*HostSystemHealthInfo)(nil)).Elem() - minAPIVersionForType["HostSystemHealthInfo"] = "2.5" } // This data object describes system identifying information of the host. @@ -45045,7 +44972,6 @@ type HostSystemIdentificationInfo struct { func init() { t["HostSystemIdentificationInfo"] = reflect.TypeOf((*HostSystemIdentificationInfo)(nil)).Elem() - minAPIVersionForType["HostSystemIdentificationInfo"] = "2.5" } // Information about the system as a whole. @@ -45062,7 +44988,7 @@ type HostSystemInfo struct { // // This information may be vendor // specific - OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty" json:"otherIdentifyingInfo,omitempty" vim:"2.5"` + OtherIdentifyingInfo []HostSystemIdentificationInfo `xml:"otherIdentifyingInfo,omitempty" json:"otherIdentifyingInfo,omitempty"` SerialNumber string `xml:"serialNumber,omitempty" json:"serialNumber,omitempty"` // List of qualified names used to identify the host in a specific context. // @@ -45079,6 +45005,12 @@ type HostSystemInfo struct { // The hostd id, obtained through vmkctl storage control path while // fetching the NVMe info. VvolHostId string `xml:"vvolHostId,omitempty" json:"vvolHostId,omitempty" vim:"8.0.0.0"` + // Command line string to identify different boot options used for host. + // + // Example of different boot options are: + // - "runweasel": "System is booted for weasel installation" + // - "ks": "System is booted for kickstart installation" + BootCommandLine string `xml:"bootCommandLine,omitempty" json:"bootCommandLine,omitempty" vim:"8.0.3.0"` } func init() { @@ -45105,7 +45037,6 @@ type HostSystemReconnectSpec struct { func init() { t["HostSystemReconnectSpec"] = reflect.TypeOf((*HostSystemReconnectSpec)(nil)).Elem() - minAPIVersionForType["HostSystemReconnectSpec"] = "5.0" } // The valid remediation states. @@ -45131,7 +45062,6 @@ type HostSystemRemediationState struct { func init() { t["HostSystemRemediationState"] = reflect.TypeOf((*HostSystemRemediationState)(nil)).Elem() - minAPIVersionForType["HostSystemRemediationState"] = "6.7" } // The SystemResourceInfo data object describes the configuration of @@ -45175,7 +45105,6 @@ type HostSystemSwapConfiguration struct { func init() { t["HostSystemSwapConfiguration"] = reflect.TypeOf((*HostSystemSwapConfiguration)(nil)).Elem() - minAPIVersionForType["HostSystemSwapConfiguration"] = "5.1" } // Use option to indicate that a user specified datastore may be used for @@ -45193,7 +45122,6 @@ type HostSystemSwapConfigurationDatastoreOption struct { func init() { t["HostSystemSwapConfigurationDatastoreOption"] = reflect.TypeOf((*HostSystemSwapConfigurationDatastoreOption)(nil)).Elem() - minAPIVersionForType["HostSystemSwapConfigurationDatastoreOption"] = "5.1" } // Indicates that the system swap on the host is currently disabled. @@ -45208,7 +45136,6 @@ type HostSystemSwapConfigurationDisabledOption struct { func init() { t["HostSystemSwapConfigurationDisabledOption"] = reflect.TypeOf((*HostSystemSwapConfigurationDisabledOption)(nil)).Elem() - minAPIVersionForType["HostSystemSwapConfigurationDisabledOption"] = "5.1" } // Use option to indicate that the host cache may be used for system @@ -45221,7 +45148,6 @@ type HostSystemSwapConfigurationHostCacheOption struct { func init() { t["HostSystemSwapConfigurationHostCacheOption"] = reflect.TypeOf((*HostSystemSwapConfigurationHostCacheOption)(nil)).Elem() - minAPIVersionForType["HostSystemSwapConfigurationHostCacheOption"] = "5.1" } // Use option to indicate that the datastore configured for host local swap @@ -45232,7 +45158,6 @@ type HostSystemSwapConfigurationHostLocalSwapOption struct { func init() { t["HostSystemSwapConfigurationHostLocalSwapOption"] = reflect.TypeOf((*HostSystemSwapConfigurationHostLocalSwapOption)(nil)).Elem() - minAPIVersionForType["HostSystemSwapConfigurationHostLocalSwapOption"] = "5.1" } // Base class for all system swap options. @@ -45251,7 +45176,6 @@ type HostSystemSwapConfigurationSystemSwapOption struct { func init() { t["HostSystemSwapConfigurationSystemSwapOption"] = reflect.TypeOf((*HostSystemSwapConfigurationSystemSwapOption)(nil)).Elem() - minAPIVersionForType["HostSystemSwapConfigurationSystemSwapOption"] = "5.1" } // Transport information about a SCSI target. @@ -45325,7 +45249,6 @@ type HostTpmAttestationInfo struct { func init() { t["HostTpmAttestationInfo"] = reflect.TypeOf((*HostTpmAttestationInfo)(nil)).Elem() - minAPIVersionForType["HostTpmAttestationInfo"] = "6.7" } // This class is used to report Trusted Platform Module (TPM) attestation @@ -45376,7 +45299,6 @@ type HostTpmAttestationReport struct { func init() { t["HostTpmAttestationReport"] = reflect.TypeOf((*HostTpmAttestationReport)(nil)).Elem() - minAPIVersionForType["HostTpmAttestationReport"] = "5.1" } // Details of a Trusted Platform Module (TPM) event recording the @@ -45415,7 +45337,6 @@ type HostTpmBootSecurityOptionEventDetails struct { func init() { t["HostTpmBootSecurityOptionEventDetails"] = reflect.TypeOf((*HostTpmBootSecurityOptionEventDetails)(nil)).Elem() - minAPIVersionForType["HostTpmBootSecurityOptionEventDetails"] = "5.1" } // Details of an Trusted Platform Module (TPM) event recording options entered @@ -45429,7 +45350,6 @@ type HostTpmCommandEventDetails struct { func init() { t["HostTpmCommandEventDetails"] = reflect.TypeOf((*HostTpmCommandEventDetails)(nil)).Elem() - minAPIVersionForType["HostTpmCommandEventDetails"] = "5.1" } // This data object type describes the digest values in the Platform @@ -45443,7 +45363,6 @@ type HostTpmDigestInfo struct { func init() { t["HostTpmDigestInfo"] = reflect.TypeOf((*HostTpmDigestInfo)(nil)).Elem() - minAPIVersionForType["HostTpmDigestInfo"] = "4.0" } // This is a base data object for describing an event generated by @@ -45455,17 +45374,16 @@ type HostTpmEventDetails struct { DynamicData // Value of the Platform Configuration Register (PCR) for this event. - DataHash []byte `xml:"dataHash" json:"dataHash"` + DataHash ByteSlice `xml:"dataHash" json:"dataHash"` // Method in which the digest hash is calculated. // // The set of possible // values is described in `HostDigestInfoDigestMethodType_enum`. - DataHashMethod string `xml:"dataHashMethod,omitempty" json:"dataHashMethod,omitempty" vim:"6.7"` + DataHashMethod string `xml:"dataHashMethod,omitempty" json:"dataHashMethod,omitempty"` } func init() { t["HostTpmEventDetails"] = reflect.TypeOf((*HostTpmEventDetails)(nil)).Elem() - minAPIVersionForType["HostTpmEventDetails"] = "5.1" } // This data object represents a single entry of an event log created by @@ -45489,7 +45407,6 @@ type HostTpmEventLogEntry struct { func init() { t["HostTpmEventLogEntry"] = reflect.TypeOf((*HostTpmEventLogEntry)(nil)).Elem() - minAPIVersionForType["HostTpmEventLogEntry"] = "5.1" } // Details of an Trusted Platform Module (TPM) event recording TPM NVRAM tag. @@ -45517,12 +45434,11 @@ type HostTpmOptionEventDetails struct { // This array exposes the raw contents of the settings file (or files) that were // passed to kernel during the boot up process, and, therefore, should be treated // accordingly. - BootOptions []byte `xml:"bootOptions,omitempty" json:"bootOptions,omitempty"` + BootOptions ByteSlice `xml:"bootOptions,omitempty" json:"bootOptions,omitempty"` } func init() { t["HostTpmOptionEventDetails"] = reflect.TypeOf((*HostTpmOptionEventDetails)(nil)).Elem() - minAPIVersionForType["HostTpmOptionEventDetails"] = "5.1" } // Details of a Trusted Platform Module (TPM) event recording the measurement @@ -45561,7 +45477,6 @@ type HostTpmSoftwareComponentEventDetails struct { func init() { t["HostTpmSoftwareComponentEventDetails"] = reflect.TypeOf((*HostTpmSoftwareComponentEventDetails)(nil)).Elem() - minAPIVersionForType["HostTpmSoftwareComponentEventDetails"] = "5.1" } // Details of a Trusted Platform Module (TPM) event recording the @@ -45669,7 +45584,6 @@ type HostUnresolvedVmfsExtent struct { func init() { t["HostUnresolvedVmfsExtent"] = reflect.TypeOf((*HostUnresolvedVmfsExtent)(nil)).Elem() - minAPIVersionForType["HostUnresolvedVmfsExtent"] = "4.0" } // Specification to resignature an Unresolved VMFS volume. @@ -45682,7 +45596,6 @@ type HostUnresolvedVmfsResignatureSpec struct { func init() { t["HostUnresolvedVmfsResignatureSpec"] = reflect.TypeOf((*HostUnresolvedVmfsResignatureSpec)(nil)).Elem() - minAPIVersionForType["HostUnresolvedVmfsResignatureSpec"] = "4.0" } // When an UnresolvedVmfsVolume has been resignatured or forceMounted, we want to @@ -45700,7 +45613,6 @@ type HostUnresolvedVmfsResolutionResult struct { func init() { t["HostUnresolvedVmfsResolutionResult"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionResult)(nil)).Elem() - minAPIVersionForType["HostUnresolvedVmfsResolutionResult"] = "4.0" } // An unresolved VMFS volume is reported when one or more device partitions @@ -45733,7 +45645,6 @@ type HostUnresolvedVmfsResolutionSpec struct { func init() { t["HostUnresolvedVmfsResolutionSpec"] = reflect.TypeOf((*HostUnresolvedVmfsResolutionSpec)(nil)).Elem() - minAPIVersionForType["HostUnresolvedVmfsResolutionSpec"] = "4.0" } // Information about detected unbound, unresolved VMFS volume. @@ -45783,7 +45694,6 @@ type HostUnresolvedVmfsVolume struct { func init() { t["HostUnresolvedVmfsVolume"] = reflect.TypeOf((*HostUnresolvedVmfsVolume)(nil)).Elem() - minAPIVersionForType["HostUnresolvedVmfsVolume"] = "4.0" } // Data object that describes the resolvability of a volume. @@ -45815,7 +45725,6 @@ type HostUnresolvedVmfsVolumeResolveStatus struct { func init() { t["HostUnresolvedVmfsVolumeResolveStatus"] = reflect.TypeOf((*HostUnresolvedVmfsVolumeResolveStatus)(nil)).Elem() - minAPIVersionForType["HostUnresolvedVmfsVolumeResolveStatus"] = "4.0" } // The parameters of `HostVStorageObjectManager.HostUpdateVStorageObjectMetadataEx_Task`. @@ -45836,6 +45745,7 @@ type HostUpdateVStorageObjectMetadataExRequestType struct { func init() { t["HostUpdateVStorageObjectMetadataExRequestType"] = reflect.TypeOf((*HostUpdateVStorageObjectMetadataExRequestType)(nil)).Elem() + minAPIVersionForType["HostUpdateVStorageObjectMetadataExRequestType"] = "7.0.2.0" } type HostUpdateVStorageObjectMetadataEx_Task HostUpdateVStorageObjectMetadataExRequestType @@ -45900,7 +45810,6 @@ type HostUserWorldSwapNotEnabledEvent struct { func init() { t["HostUserWorldSwapNotEnabledEvent"] = reflect.TypeOf((*HostUserWorldSwapNotEnabledEvent)(nil)).Elem() - minAPIVersionForType["HostUserWorldSwapNotEnabledEvent"] = "4.0" } // Data object describes host vFlash cache configuration information. @@ -45926,7 +45835,6 @@ type HostVFlashManagerVFlashCacheConfigInfo struct { func init() { t["HostVFlashManagerVFlashCacheConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigInfo)(nil)).Elem() - minAPIVersionForType["HostVFlashManagerVFlashCacheConfigInfo"] = "5.5" } type HostVFlashManagerVFlashCacheConfigInfoVFlashModuleConfigOption struct { @@ -45979,7 +45887,6 @@ type HostVFlashManagerVFlashCacheConfigSpec struct { func init() { t["HostVFlashManagerVFlashCacheConfigSpec"] = reflect.TypeOf((*HostVFlashManagerVFlashCacheConfigSpec)(nil)).Elem() - minAPIVersionForType["HostVFlashManagerVFlashCacheConfigSpec"] = "5.5" } // vFlash configuration Information. @@ -45994,7 +45901,6 @@ type HostVFlashManagerVFlashConfigInfo struct { func init() { t["HostVFlashManagerVFlashConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashConfigInfo)(nil)).Elem() - minAPIVersionForType["HostVFlashManagerVFlashConfigInfo"] = "5.5" } // vFlash resource configuration Information. @@ -46012,7 +45918,6 @@ type HostVFlashManagerVFlashResourceConfigInfo struct { func init() { t["HostVFlashManagerVFlashResourceConfigInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceConfigInfo)(nil)).Elem() - minAPIVersionForType["HostVFlashManagerVFlashResourceConfigInfo"] = "5.5" } // vFlash resource configuration specification. @@ -46025,7 +45930,6 @@ type HostVFlashManagerVFlashResourceConfigSpec struct { func init() { t["HostVFlashManagerVFlashResourceConfigSpec"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceConfigSpec)(nil)).Elem() - minAPIVersionForType["HostVFlashManagerVFlashResourceConfigSpec"] = "5.5" } // Data object provides vFlash resource runtime usage. @@ -46049,7 +45953,6 @@ type HostVFlashManagerVFlashResourceRunTimeInfo struct { func init() { t["HostVFlashManagerVFlashResourceRunTimeInfo"] = reflect.TypeOf((*HostVFlashManagerVFlashResourceRunTimeInfo)(nil)).Elem() - minAPIVersionForType["HostVFlashManagerVFlashResourceRunTimeInfo"] = "5.5" } // vFlash resource configuration result returns the newly-configured backend @@ -46067,7 +45970,6 @@ type HostVFlashResourceConfigurationResult struct { func init() { t["HostVFlashResourceConfigurationResult"] = reflect.TypeOf((*HostVFlashResourceConfigurationResult)(nil)).Elem() - minAPIVersionForType["HostVFlashResourceConfigurationResult"] = "5.5" } // The object type for the array returned by queryVMotionCompatibility; @@ -46158,7 +46060,6 @@ type HostVMotionManagerDstInstantCloneResult struct { func init() { t["HostVMotionManagerDstInstantCloneResult"] = reflect.TypeOf((*HostVMotionManagerDstInstantCloneResult)(nil)).Elem() - minAPIVersionForType["HostVMotionManagerDstInstantCloneResult"] = "7.0" } // The result of an InstantClone InitiateSource task. @@ -46183,7 +46084,6 @@ type HostVMotionManagerSrcInstantCloneResult struct { func init() { t["HostVMotionManagerSrcInstantCloneResult"] = reflect.TypeOf((*HostVMotionManagerSrcInstantCloneResult)(nil)).Elem() - minAPIVersionForType["HostVMotionManagerSrcInstantCloneResult"] = "7.0" } // The NetConfig data object type contains the networking @@ -46226,7 +46126,7 @@ type HostVStorageObjectCreateDiskFromSnapshotRequestType struct { Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr" json:"crypto,omitempty"` // Relative location in the specified datastore where disk needs // to be created. If not specified disk gets created at defualt - // VStorageObject location on the specified datastore + // VStorageObject location on the specified datastore. Path string `xml:"path,omitempty" json:"path,omitempty"` // Provisioining type of the disk as specified in above // mentioned profile. The list of supported values can be found in @@ -46365,7 +46265,6 @@ type HostVfatVolume struct { func init() { t["HostVfatVolume"] = reflect.TypeOf((*HostVfatVolume)(nil)).Elem() - minAPIVersionForType["HostVfatVolume"] = "5.0" } // This data object type describes the VFFS @@ -46394,7 +46293,6 @@ type HostVffsSpec struct { func init() { t["HostVffsSpec"] = reflect.TypeOf((*HostVffsSpec)(nil)).Elem() - minAPIVersionForType["HostVffsSpec"] = "5.5" } // vFlash File System Volume. @@ -46416,7 +46314,6 @@ type HostVffsVolume struct { func init() { t["HostVffsVolume"] = reflect.TypeOf((*HostVffsVolume)(nil)).Elem() - minAPIVersionForType["HostVffsVolume"] = "5.5" } // The `HostVirtualNic` data object describes a virtual network adapter @@ -46511,12 +46408,11 @@ type HostVirtualNicConnection struct { // // If the virtual nic is to be connected to a logicSwitch, // \#opNetwork will be set instead of #portgroup and #dvPort - OpNetwork *HostVirtualNicOpaqueNetworkSpec `xml:"opNetwork,omitempty" json:"opNetwork,omitempty" vim:"6.7"` + OpNetwork *HostVirtualNicOpaqueNetworkSpec `xml:"opNetwork,omitempty" json:"opNetwork,omitempty"` } func init() { t["HostVirtualNicConnection"] = reflect.TypeOf((*HostVirtualNicConnection)(nil)).Elem() - minAPIVersionForType["HostVirtualNicConnection"] = "4.0" } // The `HostVirtualNicIpRouteSpec` data object describes the @@ -46538,7 +46434,6 @@ type HostVirtualNicIpRouteSpec struct { func init() { t["HostVirtualNicIpRouteSpec"] = reflect.TypeOf((*HostVirtualNicIpRouteSpec)(nil)).Elem() - minAPIVersionForType["HostVirtualNicIpRouteSpec"] = "6.5" } // This data object type describes VirtualNic host @@ -46555,7 +46450,6 @@ type HostVirtualNicManagerInfo struct { func init() { t["HostVirtualNicManagerInfo"] = reflect.TypeOf((*HostVirtualNicManagerInfo)(nil)).Elem() - minAPIVersionForType["HostVirtualNicManagerInfo"] = "4.0" } // DataObject which lets a VirtualNic be marked for @@ -46570,7 +46464,6 @@ type HostVirtualNicManagerNicTypeSelection struct { func init() { t["HostVirtualNicManagerNicTypeSelection"] = reflect.TypeOf((*HostVirtualNicManagerNicTypeSelection)(nil)).Elem() - minAPIVersionForType["HostVirtualNicManagerNicTypeSelection"] = "4.0" } // The `HostVirtualNicOpaqueNetworkSpec` data object @@ -46587,7 +46480,6 @@ type HostVirtualNicOpaqueNetworkSpec struct { func init() { t["HostVirtualNicOpaqueNetworkSpec"] = reflect.TypeOf((*HostVirtualNicOpaqueNetworkSpec)(nil)).Elem() - minAPIVersionForType["HostVirtualNicOpaqueNetworkSpec"] = "6.0" } // The `HostVirtualNicSpec` data object describes the @@ -46613,26 +46505,26 @@ type HostVirtualNicSpec struct { // to which the virtual NIC should connect. You can specify this property // only if you do not specify `HostVirtualNicSpec.distributedVirtualPort` and // `HostVirtualNicSpec.opaqueNetwork` - DistributedVirtualPort *DistributedVirtualSwitchPortConnection `xml:"distributedVirtualPort,omitempty" json:"distributedVirtualPort,omitempty" vim:"4.0"` + DistributedVirtualPort *DistributedVirtualSwitchPortConnection `xml:"distributedVirtualPort,omitempty" json:"distributedVirtualPort,omitempty"` // Portgroup (`HostPortGroup`) to which the virtual NIC is connected. // // When reconfiguring a virtual NIC, this property indicates the new portgroup // to which the virtual NIC should connect. You can specify this property // only if you do not specify `HostVirtualNicSpec.distributedVirtualPort` and // `HostVirtualNicSpec.opaqueNetwork` - Portgroup string `xml:"portgroup,omitempty" json:"portgroup,omitempty" vim:"4.0"` + Portgroup string `xml:"portgroup,omitempty" json:"portgroup,omitempty"` // Maximum transmission unit for packets size in bytes for the virtual // NIC. // // If not specified, the Server will use the system default value. - Mtu int32 `xml:"mtu,omitempty" json:"mtu,omitempty" vim:"4.0"` + Mtu int32 `xml:"mtu,omitempty" json:"mtu,omitempty"` // Flag enabling or disabling TCP segmentation offset for a virtual NIC. // // If not specified, a default value of true will be used. - TsoEnabled *bool `xml:"tsoEnabled" json:"tsoEnabled,omitempty" vim:"4.0"` + TsoEnabled *bool `xml:"tsoEnabled" json:"tsoEnabled,omitempty"` // The NetStackInstance that the virtual NIC uses, the value of this property // is default to be `defaultTcpipStack` - NetStackInstanceKey string `xml:"netStackInstanceKey,omitempty" json:"netStackInstanceKey,omitempty" vim:"5.5"` + NetStackInstanceKey string `xml:"netStackInstanceKey,omitempty" json:"netStackInstanceKey,omitempty"` // Opaque network (`HostOpaqueNetworkInfo`) to which the // virtual NIC is connected. // @@ -46640,7 +46532,7 @@ type HostVirtualNicSpec struct { // of opaque network to which the virtual NIC should connect. You can specify // this property only if you do not specify `HostVirtualNicSpec.distributedVirtualPort` // and `HostVirtualNicSpec.portgroup`. - OpaqueNetwork *HostVirtualNicOpaqueNetworkSpec `xml:"opaqueNetwork,omitempty" json:"opaqueNetwork,omitempty" vim:"6.0"` + OpaqueNetwork *HostVirtualNicOpaqueNetworkSpec `xml:"opaqueNetwork,omitempty" json:"opaqueNetwork,omitempty"` // An ID assigned to the vmkernel adapter by external management plane. // // The value and format of this property is determined by external management @@ -46649,7 +46541,7 @@ type HostVirtualNicSpec struct { // // This property is applicable only when `HostVirtualNicSpec.opaqueNetwork` property is set, // otherwise it's value is ignored. - ExternalId string `xml:"externalId,omitempty" json:"externalId,omitempty" vim:"6.0"` + ExternalId string `xml:"externalId,omitempty" json:"externalId,omitempty"` // The physical nic to which the vmkernel adapter is pinned. // // Setting this value @@ -46660,15 +46552,15 @@ type HostVirtualNicSpec struct { // If the vmkernel adapter is connected to a portgroup or dvPort, then such // pinning can be achieved by configuring correct teaming policy on the portgroup // or dvPort or dvPortgroup that is connected to virtual NIC. - PinnedPnic string `xml:"pinnedPnic,omitempty" json:"pinnedPnic,omitempty" vim:"6.0"` + PinnedPnic string `xml:"pinnedPnic,omitempty" json:"pinnedPnic,omitempty"` // The ip route configuration used by the vmkernel adapter. // // This attribute // allows the vmkernel adapter to specify its own default gateway. - IpRouteSpec *HostVirtualNicIpRouteSpec `xml:"ipRouteSpec,omitempty" json:"ipRouteSpec,omitempty" vim:"6.5"` + IpRouteSpec *HostVirtualNicIpRouteSpec `xml:"ipRouteSpec,omitempty" json:"ipRouteSpec,omitempty"` // Set to true when the vmkernel adapter is configured by // other system indirectly other than by the user directly. - SystemOwned *bool `xml:"systemOwned" json:"systemOwned,omitempty" vim:"7.0"` + SystemOwned *bool `xml:"systemOwned" json:"systemOwned,omitempty"` // The identifier of the DPU hosting the vmknic. // // If vmknic is on ESX host, dpuId will be unset. @@ -46706,7 +46598,7 @@ type HostVirtualSwitch struct { NumPortsAvailable int32 `xml:"numPortsAvailable" json:"numPortsAvailable"` // The maximum transmission unit (MTU) associated with this virtual switch // in bytes. - Mtu int32 `xml:"mtu,omitempty" json:"mtu,omitempty" vim:"2.5"` + Mtu int32 `xml:"mtu,omitempty" json:"mtu,omitempty"` // The list of port groups configured for this virtual switch. Portgroup []string `xml:"portgroup,omitempty" json:"portgroup,omitempty"` // The set of physical network adapters associated with this bridge. @@ -46730,7 +46622,7 @@ type HostVirtualSwitchAutoBridge struct { // List of physical network adapters that have been excluded from // participating in the AutoBridge - ExcludedNicDevice []string `xml:"excludedNicDevice,omitempty" json:"excludedNicDevice,omitempty" vim:"2.5"` + ExcludedNicDevice []string `xml:"excludedNicDevice,omitempty" json:"excludedNicDevice,omitempty"` } func init() { @@ -46777,7 +46669,7 @@ type HostVirtualSwitchBondBridge struct { // The link discovery protocol configuration for the virtual switch. // // See also `LinkDiscoveryProtocolConfig`. - LinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:"linkDiscoveryProtocolConfig,omitempty" json:"linkDiscoveryProtocolConfig,omitempty" vim:"4.0"` + LinkDiscoveryProtocolConfig *LinkDiscoveryProtocolConfig `xml:"linkDiscoveryProtocolConfig,omitempty" json:"linkDiscoveryProtocolConfig,omitempty"` } func init() { @@ -46855,7 +46747,7 @@ type HostVirtualSwitchSpec struct { // be unchanged. Policy *HostNetworkPolicy `xml:"policy,omitempty" json:"policy,omitempty"` // The maximum transmission unit (MTU) of the virtual switch in bytes. - Mtu int32 `xml:"mtu,omitempty" json:"mtu,omitempty" vim:"2.5"` + Mtu int32 `xml:"mtu,omitempty" json:"mtu,omitempty"` } func init() { @@ -46873,6 +46765,7 @@ func init() { type HostVmciAccessManagerAccessSpec struct { DynamicData + // Refers instance of `VirtualMachine`. Vm ManagedObjectReference `xml:"vm" json:"vm"` Services []string `xml:"services,omitempty" json:"services,omitempty"` Mode string `xml:"mode" json:"mode"` @@ -46880,7 +46773,6 @@ type HostVmciAccessManagerAccessSpec struct { func init() { t["HostVmciAccessManagerAccessSpec"] = reflect.TypeOf((*HostVmciAccessManagerAccessSpec)(nil)).Elem() - minAPIVersionForType["HostVmciAccessManagerAccessSpec"] = "5.0" } // When a user resignatures an UnresolvedVmfsVolume through DatastoreSystem API, @@ -46902,7 +46794,6 @@ type HostVmfsRescanResult struct { func init() { t["HostVmfsRescanResult"] = reflect.TypeOf((*HostVmfsRescanResult)(nil)).Elem() - minAPIVersionForType["HostVmfsRescanResult"] = "4.0" } // This data object type describes the VMware File System (VMFS) @@ -46953,7 +46844,7 @@ type HostVmfsSpec struct { // In VMFS3, the valid block sizes are 1MB, 2MB, 4MB, and 8MB. // In VMFS5, the only valid block size is 1MB. // In VMFS6, the valid block sizes are 64KB and 1MB. - BlockSize int32 `xml:"blockSize,omitempty" json:"blockSize,omitempty" vim:"6.5"` + BlockSize int32 `xml:"blockSize,omitempty" json:"blockSize,omitempty"` // The granularity of VMFS unmap operations. // // VMFS unmap reclaims @@ -46961,18 +46852,18 @@ type HostVmfsSpec struct { // The unit is KB. The minimum unmap granularity is 8KB. The maximum // unmap granularity is determined by the block size of VMFS // `HostVmfsVolume.blockSize`. - UnmapGranularity int32 `xml:"unmapGranularity,omitempty" json:"unmapGranularity,omitempty" vim:"6.5"` + UnmapGranularity int32 `xml:"unmapGranularity,omitempty" json:"unmapGranularity,omitempty"` // VMFS unmap priority. // // VMFS unmap reclaims unused storage space. This // determines the processing rate of unmaps. // See `HostVmfsVolumeUnmapPriority_enum` for supported values. - UnmapPriority string `xml:"unmapPriority,omitempty" json:"unmapPriority,omitempty" vim:"6.5"` + UnmapPriority string `xml:"unmapPriority,omitempty" json:"unmapPriority,omitempty"` // VMFS unmap bandwidth related specification. // // See // `VmfsUnmapBandwidthSpec` for detail. - UnmapBandwidthSpec *VmfsUnmapBandwidthSpec `xml:"unmapBandwidthSpec,omitempty" json:"unmapBandwidthSpec,omitempty" vim:"6.7"` + UnmapBandwidthSpec *VmfsUnmapBandwidthSpec `xml:"unmapBandwidthSpec,omitempty" json:"unmapBandwidthSpec,omitempty"` } func init() { @@ -47001,7 +46892,7 @@ type HostVmfsVolume struct { // To increase the maximum size of a VMFS file, increase the block size. // // The minimum block size is 1MB. - BlockSize int32 `xml:"blockSize,omitempty" json:"blockSize,omitempty" vim:"6.5"` + BlockSize int32 `xml:"blockSize,omitempty" json:"blockSize,omitempty"` // VMFS unmap reclaims unused storage space. // // This property @@ -47009,7 +46900,7 @@ type HostVmfsVolume struct { // The unit is KB. If not specified, the default value is the same as // the block size of VMFS `HostVmfsVolume.blockSize`. // This property cannot be changed after a VMFS volume is created. - UnmapGranularity int32 `xml:"unmapGranularity,omitempty" json:"unmapGranularity,omitempty" vim:"6.5"` + UnmapGranularity int32 `xml:"unmapGranularity,omitempty" json:"unmapGranularity,omitempty"` // VMFS unmap reclaims unused storage space. // // This property @@ -47019,12 +46910,12 @@ type HostVmfsVolume struct { // `low`, which means // unmap is processed at low rate. This property can be updated by // calling `HostStorageSystem.UpdateVmfsUnmapPriority`. - UnmapPriority string `xml:"unmapPriority,omitempty" json:"unmapPriority,omitempty" vim:"6.5"` + UnmapPriority string `xml:"unmapPriority,omitempty" json:"unmapPriority,omitempty"` // VMFS unmap bandwidth related specification. // // See // `VmfsUnmapBandwidthSpec` for detail. - UnmapBandwidthSpec *VmfsUnmapBandwidthSpec `xml:"unmapBandwidthSpec,omitempty" json:"unmapBandwidthSpec,omitempty" vim:"6.7"` + UnmapBandwidthSpec *VmfsUnmapBandwidthSpec `xml:"unmapBandwidthSpec,omitempty" json:"unmapBandwidthSpec,omitempty"` // Maximum number of blocks. // // Determines maximum file size along @@ -47073,22 +46964,22 @@ type HostVmfsVolume struct { // 'UnresolvedVmfsVolume'. If user decides to 'forceMount' the // VmfsVolume on the host, forceMountedInfo will be populated. // It will not be set for automounted VMFS volumes. - ForceMountedInfo *HostForceMountedInfo `xml:"forceMountedInfo,omitempty" json:"forceMountedInfo,omitempty" vim:"4.0"` + ForceMountedInfo *HostForceMountedInfo `xml:"forceMountedInfo,omitempty" json:"forceMountedInfo,omitempty"` // Indicates whether the volume is SSD backed. // // If unset, the information whether the volume is SSD backed is unknown. - Ssd *bool `xml:"ssd" json:"ssd,omitempty" vim:"5.0"` + Ssd *bool `xml:"ssd" json:"ssd,omitempty"` // Indicates whether the volume is backed by local disk. // // If unset, the information of the volume is local-disk backed is unknown. - Local *bool `xml:"local" json:"local,omitempty" vim:"5.5"` + Local *bool `xml:"local" json:"local,omitempty"` // The type of disk drives. // // See `ScsiDiskType_enum` // for supported types. // If unset, the default disk drive type is // `native512`. - ScsiDiskType string `xml:"scsiDiskType,omitempty" json:"scsiDiskType,omitempty" vim:"6.5"` + ScsiDiskType string `xml:"scsiDiskType,omitempty" json:"scsiDiskType,omitempty"` } func init() { @@ -47104,12 +46995,11 @@ type HostVnicConnectedToCustomizedDVPortEvent struct { // Information about the Virtual NIC that is using the DVport. Vnic VnicPortArgument `xml:"vnic" json:"vnic"` // Information about the previous Virtual NIC that is using the DVport. - PrevPortKey string `xml:"prevPortKey,omitempty" json:"prevPortKey,omitempty" vim:"6.5"` + PrevPortKey string `xml:"prevPortKey,omitempty" json:"prevPortKey,omitempty"` } func init() { t["HostVnicConnectedToCustomizedDVPortEvent"] = reflect.TypeOf((*HostVnicConnectedToCustomizedDVPortEvent)(nil)).Elem() - minAPIVersionForType["HostVnicConnectedToCustomizedDVPortEvent"] = "4.0" } // All fields in the CMMDS Query spec are optional, but at least one needs @@ -47129,7 +47019,6 @@ type HostVsanInternalSystemCmmdsQuery struct { func init() { t["HostVsanInternalSystemCmmdsQuery"] = reflect.TypeOf((*HostVsanInternalSystemCmmdsQuery)(nil)).Elem() - minAPIVersionForType["HostVsanInternalSystemCmmdsQuery"] = "5.5" } // Result of DeleteVsanObjects. @@ -47148,7 +47037,6 @@ type HostVsanInternalSystemDeleteVsanObjectsResult struct { func init() { t["HostVsanInternalSystemDeleteVsanObjectsResult"] = reflect.TypeOf((*HostVsanInternalSystemDeleteVsanObjectsResult)(nil)).Elem() - minAPIVersionForType["HostVsanInternalSystemDeleteVsanObjectsResult"] = "5.5" } // Operation result for a VSAN object upon failure. @@ -47163,7 +47051,6 @@ type HostVsanInternalSystemVsanObjectOperationResult struct { func init() { t["HostVsanInternalSystemVsanObjectOperationResult"] = reflect.TypeOf((*HostVsanInternalSystemVsanObjectOperationResult)(nil)).Elem() - minAPIVersionForType["HostVsanInternalSystemVsanObjectOperationResult"] = "6.0" } // Result structure for a VSAN Physical Disk Diagnostics run. @@ -47183,7 +47070,6 @@ type HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult struct { func init() { t["HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult"] = reflect.TypeOf((*HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult)(nil)).Elem() - minAPIVersionForType["HostVsanInternalSystemVsanPhysicalDiskDiagnosticsResult"] = "5.5" } type HostVvolNQN struct { @@ -47196,6 +47082,7 @@ type HostVvolNQN struct { func init() { t["HostVvolNQN"] = reflect.TypeOf((*HostVvolNQN)(nil)).Elem() + minAPIVersionForType["HostVvolNQN"] = "8.0.2.0" } type HostVvolVolume struct { @@ -47214,6 +47101,8 @@ type HostVvolVolume struct { ProtocolEndpointType string `xml:"protocolEndpointType,omitempty" json:"protocolEndpointType,omitempty" vim:"8.0.0.0"` // vVol NQN field availability VvolNQNFieldsAvailable *bool `xml:"vvolNQNFieldsAvailable" json:"vvolNQNFieldsAvailable,omitempty" vim:"8.0.2.0"` + // if set to true, indicates a stretched container + Stretched *bool `xml:"stretched" json:"stretched,omitempty" vim:"8.0.3.0"` } func init() { @@ -47233,6 +47122,7 @@ type HostVvolVolumeHostVvolNQN struct { func init() { t["HostVvolVolumeHostVvolNQN"] = reflect.TypeOf((*HostVvolVolumeHostVvolNQN)(nil)).Elem() + minAPIVersionForType["HostVvolVolumeHostVvolNQN"] = "8.0.2.0" } type HostVvolVolumeSpecification struct { @@ -47248,6 +47138,8 @@ type HostVvolVolumeSpecification struct { StorageArray []VASAStorageArray `xml:"storageArray,omitempty" json:"storageArray,omitempty"` // Vendor specified storage-container ID Uuid string `xml:"uuid" json:"uuid"` + // if set to true, indicates a stretched container + Stretched *bool `xml:"stretched" json:"stretched,omitempty" vim:"8.0.3.0"` } func init() { @@ -47270,7 +47162,6 @@ type HostWwnChangedEvent struct { func init() { t["HostWwnChangedEvent"] = reflect.TypeOf((*HostWwnChangedEvent)(nil)).Elem() - minAPIVersionForType["HostWwnChangedEvent"] = "2.5" } // This event records a conflict of host WWNs (World Wide Name). @@ -47289,7 +47180,6 @@ type HostWwnConflictEvent struct { func init() { t["HostWwnConflictEvent"] = reflect.TypeOf((*HostWwnConflictEvent)(nil)).Elem() - minAPIVersionForType["HostWwnConflictEvent"] = "2.5" } // An attempt is being made to move a virtual machine's disk that has @@ -47301,7 +47191,6 @@ type HotSnapshotMoveNotSupported struct { func init() { t["HotSnapshotMoveNotSupported"] = reflect.TypeOf((*HotSnapshotMoveNotSupported)(nil)).Elem() - minAPIVersionForType["HotSnapshotMoveNotSupported"] = "2.5" } type HotSnapshotMoveNotSupportedFault HotSnapshotMoveNotSupported @@ -47350,7 +47239,6 @@ type HttpFault struct { func init() { t["HttpFault"] = reflect.TypeOf((*HttpFault)(nil)).Elem() - minAPIVersionForType["HttpFault"] = "4.0" } type HttpFaultFault HttpFault @@ -47394,7 +47282,6 @@ type HttpNfcLeaseCapabilities struct { func init() { t["HttpNfcLeaseCapabilities"] = reflect.TypeOf((*HttpNfcLeaseCapabilities)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseCapabilities"] = "6.7" } type HttpNfcLeaseComplete HttpNfcLeaseCompleteRequestType @@ -47431,7 +47318,6 @@ type HttpNfcLeaseDatastoreLeaseInfo struct { func init() { t["HttpNfcLeaseDatastoreLeaseInfo"] = reflect.TypeOf((*HttpNfcLeaseDatastoreLeaseInfo)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseDatastoreLeaseInfo"] = "4.1" } // Provides a mapping from logical device IDs to upload/download @@ -47457,7 +47343,24 @@ type HttpNfcLeaseDeviceUrl struct { // This is only // set for import leases. ImportKey string `xml:"importKey" json:"importKey"` - Url string `xml:"url" json:"url"` + // The URL to use to upload/download the device content. + // + // The returned url contains either an IP address, a hostname or a "\*". If a + // "\*" is returned the client must substitutes the "\*" with the hostname + // or IP address used when connecting to the server. + // For example if the client connected to "someHost" and the device + // url returned is: + // + // http:// *:somePort/somePath + // + // the client must substitute the "\*" with "someHost" before use. The resulting + // url would be: + // + // http://someHost:somePort/somePath + // + // The server cannot return a valid hostname or IP address when the client + // connects via a NAT, a proxy, or when the server is multihomed. + Url string `xml:"url" json:"url"` // SSL thumbprint for the host the URL refers to. // // Empty if no SSL thumbprint @@ -47465,28 +47368,27 @@ type HttpNfcLeaseDeviceUrl struct { SslThumbprint string `xml:"sslThumbprint" json:"sslThumbprint"` // Optional value to specify if the attached file is a disk in // vmdk format. - Disk *bool `xml:"disk" json:"disk,omitempty" vim:"4.1"` + Disk *bool `xml:"disk" json:"disk,omitempty"` // Id for this target. // // This only used for multi-POSTing, where a single HTTP // POST is applied to multiple targets. - TargetId string `xml:"targetId,omitempty" json:"targetId,omitempty" vim:"4.1"` + TargetId string `xml:"targetId,omitempty" json:"targetId,omitempty"` // Key for the datastore this disk is on. // // This is used to look up hosts // which can be used to multi-POST disk contents, in the host map of the // lease. - DatastoreKey string `xml:"datastoreKey,omitempty" json:"datastoreKey,omitempty" vim:"4.1"` + DatastoreKey string `xml:"datastoreKey,omitempty" json:"datastoreKey,omitempty"` // Specifies the size of the file backing for this device. // // This property // is only set for non-disk file backings. - FileSize int64 `xml:"fileSize,omitempty" json:"fileSize,omitempty" vim:"4.1"` + FileSize int64 `xml:"fileSize,omitempty" json:"fileSize,omitempty"` } func init() { t["HttpNfcLeaseDeviceUrl"] = reflect.TypeOf((*HttpNfcLeaseDeviceUrl)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseDeviceUrl"] = "4.0" } type HttpNfcLeaseGetManifest HttpNfcLeaseGetManifestRequestType @@ -47513,15 +47415,17 @@ type HttpNfcLeaseHostInfo struct { // The host url will be of the form // - // https://hostname/nfc/ticket id/ + // https://hostname/nfc/ticket id/ + // // The url can be used for both POST requests to a single device and for // multi-POST requests to multiple devices. A single-POST URL is formed // by adding the target id to the hostUrl: // - // https://hostname/nfc/ticket id/target id + // https://hostname/nfc/ticket id/target id + // // a multi-POST URL looks like // - // https://hostname/nfc/ticket id/multi?targets=id1,id2,id3,... + // https://hostname/nfc/ticket id/multi?targets=id1,id2,id3,... Url string `xml:"url" json:"url"` // SSL thumbprint for the host the URL refers to. // @@ -47532,7 +47436,6 @@ type HttpNfcLeaseHostInfo struct { func init() { t["HttpNfcLeaseHostInfo"] = reflect.TypeOf((*HttpNfcLeaseHostInfo)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseHostInfo"] = "4.1" } // This class holds information about the lease, such as the entity covered by the @@ -47568,12 +47471,11 @@ type HttpNfcLeaseInfo struct { // // This is used to // look up multi-POST-capable hosts for a datastore. - HostMap []HttpNfcLeaseDatastoreLeaseInfo `xml:"hostMap,omitempty" json:"hostMap,omitempty" vim:"4.1"` + HostMap []HttpNfcLeaseDatastoreLeaseInfo `xml:"hostMap,omitempty" json:"hostMap,omitempty"` } func init() { t["HttpNfcLeaseInfo"] = reflect.TypeOf((*HttpNfcLeaseInfo)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseInfo"] = "4.0" } // Provides a manifest for downloaded (exported) files and disks. @@ -47592,11 +47494,11 @@ type HttpNfcLeaseManifestEntry struct { // Checksum of the data stream sent/recieved by host. // // See `HttpNfcLeaseManifestEntryChecksumType_enum` for used algoritm. - Checksum string `xml:"checksum,omitempty" json:"checksum,omitempty" vim:"6.7"` + Checksum string `xml:"checksum,omitempty" json:"checksum,omitempty"` // Algorithm used to produce checksum in respective property. // // See `HttpNfcLeaseManifestEntryChecksumType_enum` for supported algorithms. - ChecksumType string `xml:"checksumType,omitempty" json:"checksumType,omitempty" vim:"6.7"` + ChecksumType string `xml:"checksumType,omitempty" json:"checksumType,omitempty"` // Size of the downloaded file. Size int64 `xml:"size" json:"size"` // True if the downloaded file is a virtual disk backing. @@ -47609,7 +47511,6 @@ type HttpNfcLeaseManifestEntry struct { func init() { t["HttpNfcLeaseManifestEntry"] = reflect.TypeOf((*HttpNfcLeaseManifestEntry)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseManifestEntry"] = "4.1" } // Descriptor of ProbeResult @@ -47645,6 +47546,7 @@ type HttpNfcLeaseProbeUrlsRequestType struct { func init() { t["HttpNfcLeaseProbeUrlsRequestType"] = reflect.TypeOf((*HttpNfcLeaseProbeUrlsRequestType)(nil)).Elem() + minAPIVersionForType["HttpNfcLeaseProbeUrlsRequestType"] = "7.0.2.0" } type HttpNfcLeaseProbeUrlsResponse struct { @@ -47756,7 +47658,6 @@ type HttpNfcLeaseSourceFile struct { func init() { t["HttpNfcLeaseSourceFile"] = reflect.TypeOf((*HttpNfcLeaseSourceFile)(nil)).Elem() - minAPIVersionForType["HttpNfcLeaseSourceFile"] = "6.7" } // This data object type describes an identifier class which @@ -47771,7 +47672,6 @@ type ID struct { func init() { t["ID"] = reflect.TypeOf((*ID)(nil)).Elem() - minAPIVersionForType["ID"] = "6.5" } // Deprecated as of VI API 2.5, use `DeviceControllerNotSupported`. @@ -47812,7 +47712,6 @@ type IORMNotSupportedHostOnDatastore struct { func init() { t["IORMNotSupportedHostOnDatastore"] = reflect.TypeOf((*IORMNotSupportedHostOnDatastore)(nil)).Elem() - minAPIVersionForType["IORMNotSupportedHostOnDatastore"] = "4.1" } type IORMNotSupportedHostOnDatastoreFault IORMNotSupportedHostOnDatastore @@ -47828,7 +47727,6 @@ type IScsiBootFailureEvent struct { func init() { t["IScsiBootFailureEvent"] = reflect.TypeOf((*IScsiBootFailureEvent)(nil)).Elem() - minAPIVersionForType["IScsiBootFailureEvent"] = "4.1" } type ImpersonateUser ImpersonateUserRequestType @@ -47895,7 +47793,6 @@ type ImportHostAddFailure struct { func init() { t["ImportHostAddFailure"] = reflect.TypeOf((*ImportHostAddFailure)(nil)).Elem() - minAPIVersionForType["ImportHostAddFailure"] = "5.1" } type ImportHostAddFailureFault ImportHostAddFailure @@ -47914,7 +47811,6 @@ type ImportOperationBulkFault struct { func init() { t["ImportOperationBulkFault"] = reflect.TypeOf((*ImportOperationBulkFault)(nil)).Elem() - minAPIVersionForType["ImportOperationBulkFault"] = "5.1" } type ImportOperationBulkFaultFault ImportOperationBulkFault @@ -47940,7 +47836,6 @@ type ImportOperationBulkFaultFaultOnImport struct { func init() { t["ImportOperationBulkFaultFaultOnImport"] = reflect.TypeOf((*ImportOperationBulkFaultFaultOnImport)(nil)).Elem() - minAPIVersionForType["ImportOperationBulkFaultFaultOnImport"] = "5.1" } // An ImportSpec is used when importing VMs or vApps. @@ -47961,12 +47856,11 @@ type ImportSpec struct { EntityConfig *VAppEntityConfigInfo `xml:"entityConfig,omitempty" json:"entityConfig,omitempty"` // The instantiation OST (see `OvfConsumer` ) to be consumed by OVF // consumers. - InstantiationOst *OvfConsumerOstNode `xml:"instantiationOst,omitempty" json:"instantiationOst,omitempty" vim:"5.0"` + InstantiationOst *OvfConsumerOstNode `xml:"instantiationOst,omitempty" json:"instantiationOst,omitempty"` } func init() { t["ImportSpec"] = reflect.TypeOf((*ImportSpec)(nil)).Elem() - minAPIVersionForType["ImportSpec"] = "4.0" } type ImportUnmanagedSnapshot ImportUnmanagedSnapshotRequestType @@ -48040,7 +47934,6 @@ type InUseFeatureManipulationDisallowed struct { func init() { t["InUseFeatureManipulationDisallowed"] = reflect.TypeOf((*InUseFeatureManipulationDisallowed)(nil)).Elem() - minAPIVersionForType["InUseFeatureManipulationDisallowed"] = "4.0" } type InUseFeatureManipulationDisallowedFault InUseFeatureManipulationDisallowed @@ -48076,7 +47969,6 @@ type InaccessibleFTMetadataDatastore struct { func init() { t["InaccessibleFTMetadataDatastore"] = reflect.TypeOf((*InaccessibleFTMetadataDatastore)(nil)).Elem() - minAPIVersionForType["InaccessibleFTMetadataDatastore"] = "6.0" } type InaccessibleFTMetadataDatastoreFault InaccessibleFTMetadataDatastore @@ -48098,7 +47990,6 @@ type InaccessibleVFlashSource struct { func init() { t["InaccessibleVFlashSource"] = reflect.TypeOf((*InaccessibleVFlashSource)(nil)).Elem() - minAPIVersionForType["InaccessibleVFlashSource"] = "5.5" } type InaccessibleVFlashSourceFault InaccessibleVFlashSource @@ -48132,7 +48023,6 @@ type IncompatibleDefaultDevice struct { func init() { t["IncompatibleDefaultDevice"] = reflect.TypeOf((*IncompatibleDefaultDevice)(nil)).Elem() - minAPIVersionForType["IncompatibleDefaultDevice"] = "2.5" } type IncompatibleDefaultDeviceFault IncompatibleDefaultDevice @@ -48158,7 +48048,6 @@ type IncompatibleHostForFtSecondary struct { func init() { t["IncompatibleHostForFtSecondary"] = reflect.TypeOf((*IncompatibleHostForFtSecondary)(nil)).Elem() - minAPIVersionForType["IncompatibleHostForFtSecondary"] = "4.0" } type IncompatibleHostForFtSecondaryFault IncompatibleHostForFtSecondary @@ -48183,7 +48072,6 @@ type IncompatibleHostForVmReplication struct { func init() { t["IncompatibleHostForVmReplication"] = reflect.TypeOf((*IncompatibleHostForVmReplication)(nil)).Elem() - minAPIVersionForType["IncompatibleHostForVmReplication"] = "6.0" } type IncompatibleHostForVmReplicationFault IncompatibleHostForVmReplication @@ -48236,7 +48124,6 @@ type IncorrectHostInformation struct { func init() { t["IncorrectHostInformation"] = reflect.TypeOf((*IncorrectHostInformation)(nil)).Elem() - minAPIVersionForType["IncorrectHostInformation"] = "2.5" } // This event records if the host did not provide the information needed @@ -48247,7 +48134,6 @@ type IncorrectHostInformationEvent struct { func init() { t["IncorrectHostInformationEvent"] = reflect.TypeOf((*IncorrectHostInformationEvent)(nil)).Elem() - minAPIVersionForType["IncorrectHostInformationEvent"] = "2.5" } type IncorrectHostInformationFault IncorrectHostInformation @@ -48282,6 +48168,7 @@ type IncreaseDirectorySizeRequestType struct { func init() { t["IncreaseDirectorySizeRequestType"] = reflect.TypeOf((*IncreaseDirectorySizeRequestType)(nil)).Elem() + minAPIVersionForType["IncreaseDirectorySizeRequestType"] = "8.0.1.0" } type IncreaseDirectorySizeResponse struct { @@ -48296,7 +48183,6 @@ type IndependentDiskVMotionNotSupported struct { func init() { t["IndependentDiskVMotionNotSupported"] = reflect.TypeOf((*IndependentDiskVMotionNotSupported)(nil)).Elem() - minAPIVersionForType["IndependentDiskVMotionNotSupported"] = "2.5" } type IndependentDiskVMotionNotSupportedFault IndependentDiskVMotionNotSupported @@ -48392,7 +48278,6 @@ type InheritablePolicy struct { func init() { t["InheritablePolicy"] = reflect.TypeOf((*InheritablePolicy)(nil)).Elem() - minAPIVersionForType["InheritablePolicy"] = "4.0" } // The parameters of `HostVsanSystem.InitializeDisks_Task`. @@ -48557,6 +48442,10 @@ type InstallIoFilterRequestType struct { // // Refers instance of `ComputeResource`. CompRes ManagedObjectReference `xml:"compRes" json:"compRes"` + // This specifies SSL trust policy `IoFilterManagerSslTrust` + // for the given VIB URL. If unset, the server certificate is + // validated against the trusted root certificates. + VibSslTrust BaseIoFilterManagerSslTrust `xml:"vibSslTrust,omitempty,typeattr" json:"vibSslTrust,omitempty" vim:"8.0.3.0"` } func init() { @@ -48650,7 +48539,6 @@ type InsufficientAgentVmsDeployed struct { func init() { t["InsufficientAgentVmsDeployed"] = reflect.TypeOf((*InsufficientAgentVmsDeployed)(nil)).Elem() - minAPIVersionForType["InsufficientAgentVmsDeployed"] = "5.0" } type InsufficientAgentVmsDeployedFault InsufficientAgentVmsDeployed @@ -48688,7 +48576,6 @@ type InsufficientDisks struct { func init() { t["InsufficientDisks"] = reflect.TypeOf((*InsufficientDisks)(nil)).Elem() - minAPIVersionForType["InsufficientDisks"] = "5.5" } type InsufficientDisksFault InsufficientDisks @@ -48741,7 +48628,6 @@ type InsufficientGraphicsResourcesFault struct { func init() { t["InsufficientGraphicsResourcesFault"] = reflect.TypeOf((*InsufficientGraphicsResourcesFault)(nil)).Elem() - minAPIVersionForType["InsufficientGraphicsResourcesFault"] = "6.0" } type InsufficientGraphicsResourcesFaultFault InsufficientGraphicsResourcesFault @@ -48757,7 +48643,7 @@ type InsufficientHostCapacityFault struct { // The host which does not have the enough capacity. // // Refers instance of `HostSystem`. - Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty" vim:"2.5"` + Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty"` } func init() { @@ -48782,7 +48668,6 @@ type InsufficientHostCpuCapacityFault struct { func init() { t["InsufficientHostCpuCapacityFault"] = reflect.TypeOf((*InsufficientHostCpuCapacityFault)(nil)).Elem() - minAPIVersionForType["InsufficientHostCpuCapacityFault"] = "4.0" } type InsufficientHostCpuCapacityFaultFault InsufficientHostCpuCapacityFault @@ -48803,7 +48688,6 @@ type InsufficientHostMemoryCapacityFault struct { func init() { t["InsufficientHostMemoryCapacityFault"] = reflect.TypeOf((*InsufficientHostMemoryCapacityFault)(nil)).Elem() - minAPIVersionForType["InsufficientHostMemoryCapacityFault"] = "4.0" } type InsufficientHostMemoryCapacityFaultFault InsufficientHostMemoryCapacityFault @@ -48839,7 +48723,6 @@ type InsufficientNetworkCapacity struct { func init() { t["InsufficientNetworkCapacity"] = reflect.TypeOf((*InsufficientNetworkCapacity)(nil)).Elem() - minAPIVersionForType["InsufficientNetworkCapacity"] = "6.0" } type InsufficientNetworkCapacityFault InsufficientNetworkCapacity @@ -48871,7 +48754,6 @@ type InsufficientNetworkResourcePoolCapacity struct { func init() { t["InsufficientNetworkResourcePoolCapacity"] = reflect.TypeOf((*InsufficientNetworkResourcePoolCapacity)(nil)).Elem() - minAPIVersionForType["InsufficientNetworkResourcePoolCapacity"] = "6.0" } type InsufficientNetworkResourcePoolCapacityFault InsufficientNetworkResourcePoolCapacity @@ -48887,7 +48769,6 @@ type InsufficientPerCpuCapacity struct { func init() { t["InsufficientPerCpuCapacity"] = reflect.TypeOf((*InsufficientPerCpuCapacity)(nil)).Elem() - minAPIVersionForType["InsufficientPerCpuCapacity"] = "2.5" } type InsufficientPerCpuCapacityFault InsufficientPerCpuCapacity @@ -48931,7 +48812,6 @@ type InsufficientStandbyCpuResource struct { func init() { t["InsufficientStandbyCpuResource"] = reflect.TypeOf((*InsufficientStandbyCpuResource)(nil)).Elem() - minAPIVersionForType["InsufficientStandbyCpuResource"] = "4.0" } type InsufficientStandbyCpuResourceFault InsufficientStandbyCpuResource @@ -48958,7 +48838,6 @@ type InsufficientStandbyMemoryResource struct { func init() { t["InsufficientStandbyMemoryResource"] = reflect.TypeOf((*InsufficientStandbyMemoryResource)(nil)).Elem() - minAPIVersionForType["InsufficientStandbyMemoryResource"] = "4.0" } type InsufficientStandbyMemoryResourceFault InsufficientStandbyMemoryResource @@ -48976,7 +48855,6 @@ type InsufficientStandbyResource struct { func init() { t["InsufficientStandbyResource"] = reflect.TypeOf((*InsufficientStandbyResource)(nil)).Elem() - minAPIVersionForType["InsufficientStandbyResource"] = "4.0" } type InsufficientStandbyResourceFault BaseInsufficientStandbyResource @@ -48999,7 +48877,6 @@ type InsufficientStorageIops struct { func init() { t["InsufficientStorageIops"] = reflect.TypeOf((*InsufficientStorageIops)(nil)).Elem() - minAPIVersionForType["InsufficientStorageIops"] = "6.0" } type InsufficientStorageIopsFault InsufficientStorageIops @@ -49017,7 +48894,6 @@ type InsufficientStorageSpace struct { func init() { t["InsufficientStorageSpace"] = reflect.TypeOf((*InsufficientStorageSpace)(nil)).Elem() - minAPIVersionForType["InsufficientStorageSpace"] = "5.0" } type InsufficientStorageSpaceFault InsufficientStorageSpace @@ -49031,18 +48907,17 @@ type InsufficientVFlashResourcesFault struct { InsufficientResourcesFault // The vFlash resource available capacity in MB. - FreeSpaceInMB int64 `xml:"freeSpaceInMB,omitempty" json:"freeSpaceInMB,omitempty" vim:"6.0"` + FreeSpaceInMB int64 `xml:"freeSpaceInMB,omitempty" json:"freeSpaceInMB,omitempty"` // The vFlash resource available capacity in bytes. FreeSpace int64 `xml:"freeSpace" json:"freeSpace"` // The vFlash resource amount requested in MB. - RequestedSpaceInMB int64 `xml:"requestedSpaceInMB,omitempty" json:"requestedSpaceInMB,omitempty" vim:"6.0"` + RequestedSpaceInMB int64 `xml:"requestedSpaceInMB,omitempty" json:"requestedSpaceInMB,omitempty"` // The vFlash resource amount requested in bytes. RequestedSpace int64 `xml:"requestedSpace" json:"requestedSpace"` } func init() { t["InsufficientVFlashResourcesFault"] = reflect.TypeOf((*InsufficientVFlashResourcesFault)(nil)).Elem() - minAPIVersionForType["InsufficientVFlashResourcesFault"] = "5.5" } type InsufficientVFlashResourcesFaultFault InsufficientVFlashResourcesFault @@ -49062,7 +48937,6 @@ type IntExpression struct { func init() { t["IntExpression"] = reflect.TypeOf((*IntExpression)(nil)).Elem() - minAPIVersionForType["IntExpression"] = "5.5" } // The IntOption data object type is used to define the minimum, maximum, @@ -49093,7 +48967,6 @@ type IntPolicy struct { func init() { t["IntPolicy"] = reflect.TypeOf((*IntPolicy)(nil)).Elem() - minAPIVersionForType["IntPolicy"] = "4.0" } // An InvalidAffinitySettingsFault is thrown if an invalid affinity setting is @@ -49104,7 +48977,6 @@ type InvalidAffinitySettingFault struct { func init() { t["InvalidAffinitySettingFault"] = reflect.TypeOf((*InvalidAffinitySettingFault)(nil)).Elem() - minAPIVersionForType["InvalidAffinitySettingFault"] = "2.5" } type InvalidAffinitySettingFaultFault InvalidAffinitySettingFault @@ -49144,7 +49016,6 @@ type InvalidBmcRole struct { func init() { t["InvalidBmcRole"] = reflect.TypeOf((*InvalidBmcRole)(nil)).Elem() - minAPIVersionForType["InvalidBmcRole"] = "4.0" } type InvalidBmcRoleFault InvalidBmcRole @@ -49161,7 +49032,6 @@ type InvalidBundle struct { func init() { t["InvalidBundle"] = reflect.TypeOf((*InvalidBundle)(nil)).Elem() - minAPIVersionForType["InvalidBundle"] = "2.5" } type InvalidBundleFault InvalidBundle @@ -49178,7 +49048,6 @@ type InvalidCAMCertificate struct { func init() { t["InvalidCAMCertificate"] = reflect.TypeOf((*InvalidCAMCertificate)(nil)).Elem() - minAPIVersionForType["InvalidCAMCertificate"] = "5.0" } type InvalidCAMCertificateFault InvalidCAMCertificate @@ -49199,7 +49068,6 @@ type InvalidCAMServer struct { func init() { t["InvalidCAMServer"] = reflect.TypeOf((*InvalidCAMServer)(nil)).Elem() - minAPIVersionForType["InvalidCAMServer"] = "5.0" } type InvalidCAMServerFault BaseInvalidCAMServer @@ -49216,7 +49084,6 @@ type InvalidClientCertificate struct { func init() { t["InvalidClientCertificate"] = reflect.TypeOf((*InvalidClientCertificate)(nil)).Elem() - minAPIVersionForType["InvalidClientCertificate"] = "2.5u2" } type InvalidClientCertificateFault InvalidClientCertificate @@ -49277,7 +49144,6 @@ type InvalidDasConfigArgument struct { func init() { t["InvalidDasConfigArgument"] = reflect.TypeOf((*InvalidDasConfigArgument)(nil)).Elem() - minAPIVersionForType["InvalidDasConfigArgument"] = "5.1" } type InvalidDasConfigArgumentFault InvalidDasConfigArgument @@ -49301,7 +49167,6 @@ type InvalidDasRestartPriorityForFtVm struct { func init() { t["InvalidDasRestartPriorityForFtVm"] = reflect.TypeOf((*InvalidDasRestartPriorityForFtVm)(nil)).Elem() - minAPIVersionForType["InvalidDasRestartPriorityForFtVm"] = "4.1" } type InvalidDasRestartPriorityForFtVmFault InvalidDasRestartPriorityForFtVm @@ -49371,7 +49236,6 @@ type InvalidDatastoreState struct { func init() { t["InvalidDatastoreState"] = reflect.TypeOf((*InvalidDatastoreState)(nil)).Elem() - minAPIVersionForType["InvalidDatastoreState"] = "5.0" } type InvalidDatastoreStateFault InvalidDatastoreState @@ -49472,7 +49336,6 @@ type InvalidDrsBehaviorForFtVm struct { func init() { t["InvalidDrsBehaviorForFtVm"] = reflect.TypeOf((*InvalidDrsBehaviorForFtVm)(nil)).Elem() - minAPIVersionForType["InvalidDrsBehaviorForFtVm"] = "4.0" } type InvalidDrsBehaviorForFtVmFault InvalidDrsBehaviorForFtVm @@ -49490,7 +49353,6 @@ type InvalidEditionEvent struct { func init() { t["InvalidEditionEvent"] = reflect.TypeOf((*InvalidEditionEvent)(nil)).Elem() - minAPIVersionForType["InvalidEditionEvent"] = "2.5" } // An ExpiredEditionLicense fault is thrown if an attempt to acquire an Edition license @@ -49503,7 +49365,6 @@ type InvalidEditionLicense struct { func init() { t["InvalidEditionLicense"] = reflect.TypeOf((*InvalidEditionLicense)(nil)).Elem() - minAPIVersionForType["InvalidEditionLicense"] = "2.5" } type InvalidEditionLicenseFault InvalidEditionLicense @@ -49520,7 +49381,6 @@ type InvalidEvent struct { func init() { t["InvalidEvent"] = reflect.TypeOf((*InvalidEvent)(nil)).Elem() - minAPIVersionForType["InvalidEvent"] = "2.5" } type InvalidEventFault InvalidEvent @@ -49581,7 +49441,6 @@ type InvalidGuestLogin struct { func init() { t["InvalidGuestLogin"] = reflect.TypeOf((*InvalidGuestLogin)(nil)).Elem() - minAPIVersionForType["InvalidGuestLogin"] = "5.0" } type InvalidGuestLoginFault InvalidGuestLogin @@ -49597,7 +49456,6 @@ type InvalidHostConnectionState struct { func init() { t["InvalidHostConnectionState"] = reflect.TypeOf((*InvalidHostConnectionState)(nil)).Elem() - minAPIVersionForType["InvalidHostConnectionState"] = "5.1" } type InvalidHostConnectionStateFault InvalidHostConnectionState @@ -49613,7 +49471,6 @@ type InvalidHostName struct { func init() { t["InvalidHostName"] = reflect.TypeOf((*InvalidHostName)(nil)).Elem() - minAPIVersionForType["InvalidHostName"] = "4.1" } type InvalidHostNameFault InvalidHostName @@ -49634,7 +49491,6 @@ type InvalidHostState struct { func init() { t["InvalidHostState"] = reflect.TypeOf((*InvalidHostState)(nil)).Elem() - minAPIVersionForType["InvalidHostState"] = "2.5" } type InvalidHostStateFault BaseInvalidHostState @@ -49654,7 +49510,6 @@ type InvalidIndexArgument struct { func init() { t["InvalidIndexArgument"] = reflect.TypeOf((*InvalidIndexArgument)(nil)).Elem() - minAPIVersionForType["InvalidIndexArgument"] = "4.0" } type InvalidIndexArgumentFault InvalidIndexArgument @@ -49673,7 +49528,6 @@ type InvalidIpfixConfig struct { func init() { t["InvalidIpfixConfig"] = reflect.TypeOf((*InvalidIpfixConfig)(nil)).Elem() - minAPIVersionForType["InvalidIpfixConfig"] = "5.1" } type InvalidIpfixConfigFault InvalidIpfixConfig @@ -49690,7 +49544,6 @@ type InvalidIpmiLoginInfo struct { func init() { t["InvalidIpmiLoginInfo"] = reflect.TypeOf((*InvalidIpmiLoginInfo)(nil)).Elem() - minAPIVersionForType["InvalidIpmiLoginInfo"] = "4.0" } type InvalidIpmiLoginInfoFault InvalidIpmiLoginInfo @@ -49710,7 +49563,6 @@ type InvalidIpmiMacAddress struct { func init() { t["InvalidIpmiMacAddress"] = reflect.TypeOf((*InvalidIpmiMacAddress)(nil)).Elem() - minAPIVersionForType["InvalidIpmiMacAddress"] = "4.0" } type InvalidIpmiMacAddressFault InvalidIpmiMacAddress @@ -49805,7 +49657,6 @@ type InvalidNasCredentials struct { func init() { t["InvalidNasCredentials"] = reflect.TypeOf((*InvalidNasCredentials)(nil)).Elem() - minAPIVersionForType["InvalidNasCredentials"] = "2.5 U2" } type InvalidNasCredentialsFault InvalidNasCredentials @@ -49821,7 +49672,6 @@ type InvalidNetworkInType struct { func init() { t["InvalidNetworkInType"] = reflect.TypeOf((*InvalidNetworkInType)(nil)).Elem() - minAPIVersionForType["InvalidNetworkInType"] = "4.0" } type InvalidNetworkInTypeFault InvalidNetworkInType @@ -49843,7 +49693,6 @@ type InvalidNetworkResource struct { func init() { t["InvalidNetworkResource"] = reflect.TypeOf((*InvalidNetworkResource)(nil)).Elem() - minAPIVersionForType["InvalidNetworkResource"] = "2.5 U2" } type InvalidNetworkResourceFault InvalidNetworkResource @@ -49864,7 +49713,6 @@ type InvalidOperationOnSecondaryVm struct { func init() { t["InvalidOperationOnSecondaryVm"] = reflect.TypeOf((*InvalidOperationOnSecondaryVm)(nil)).Elem() - minAPIVersionForType["InvalidOperationOnSecondaryVm"] = "4.0" } type InvalidOperationOnSecondaryVmFault InvalidOperationOnSecondaryVm @@ -49936,12 +49784,11 @@ type InvalidProfileReferenceHost struct { // Refers instance of `Profile`. Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty"` // The profile name: the replacement of the member above. - ProfileName string `xml:"profileName,omitempty" json:"profileName,omitempty" vim:"6.5"` + ProfileName string `xml:"profileName,omitempty" json:"profileName,omitempty"` } func init() { t["InvalidProfileReferenceHost"] = reflect.TypeOf((*InvalidProfileReferenceHost)(nil)).Elem() - minAPIVersionForType["InvalidProfileReferenceHost"] = "5.0" } type InvalidProfileReferenceHostFault InvalidProfileReferenceHost @@ -49975,7 +49822,6 @@ type InvalidPropertyType struct { func init() { t["InvalidPropertyType"] = reflect.TypeOf((*InvalidPropertyType)(nil)).Elem() - minAPIVersionForType["InvalidPropertyType"] = "4.0" } type InvalidPropertyTypeFault InvalidPropertyType @@ -49991,7 +49837,6 @@ type InvalidPropertyValue struct { func init() { t["InvalidPropertyValue"] = reflect.TypeOf((*InvalidPropertyValue)(nil)).Elem() - minAPIVersionForType["InvalidPropertyValue"] = "4.0" } type InvalidPropertyValueFault BaseInvalidPropertyValue @@ -50144,7 +49989,6 @@ type InvalidVmState struct { func init() { t["InvalidVmState"] = reflect.TypeOf((*InvalidVmState)(nil)).Elem() - minAPIVersionForType["InvalidVmState"] = "6.5" } type InvalidVmStateFault InvalidVmState @@ -50198,7 +50042,6 @@ type InventoryDescription struct { func init() { t["InventoryDescription"] = reflect.TypeOf((*InventoryDescription)(nil)).Elem() - minAPIVersionForType["InventoryDescription"] = "4.0" } // A InventoryHasStandardAloneHosts fault is thrown if an assignment operation tries to downgrade a license that does have allow hosts licensed with StandardAlone license in the inventory. @@ -50210,7 +50053,6 @@ type InventoryHasStandardAloneHosts struct { func init() { t["InventoryHasStandardAloneHosts"] = reflect.TypeOf((*InventoryHasStandardAloneHosts)(nil)).Elem() - minAPIVersionForType["InventoryHasStandardAloneHosts"] = "4.0" } type InventoryHasStandardAloneHostsFault InventoryHasStandardAloneHosts @@ -50233,7 +50075,6 @@ type IoFilterHostIssue struct { func init() { t["IoFilterHostIssue"] = reflect.TypeOf((*IoFilterHostIssue)(nil)).Elem() - minAPIVersionForType["IoFilterHostIssue"] = "6.0" } // Information about an IO Filter. @@ -50253,7 +50094,7 @@ type IoFilterInfo struct { // The set of possible values are listed in // `IoFilterType_enum`. // The property is unset if the information is not available. - Type string `xml:"type,omitempty" json:"type,omitempty" vim:"6.5"` + Type string `xml:"type,omitempty" json:"type,omitempty"` // Short description of the IO Filter. // // The property is unset if the information is not available. @@ -50266,7 +50107,16 @@ type IoFilterInfo struct { func init() { t["IoFilterInfo"] = reflect.TypeOf((*IoFilterInfo)(nil)).Elem() - minAPIVersionForType["IoFilterInfo"] = "6.0" +} + +// Specifies an SSL trust policy. +type IoFilterManagerSslTrust struct { + DynamicData +} + +func init() { + t["IoFilterManagerSslTrust"] = reflect.TypeOf((*IoFilterManagerSslTrust)(nil)).Elem() + minAPIVersionForType["IoFilterManagerSslTrust"] = "8.0.3.0" } // Result for `IoFilterManager.QueryIoFilterIssues`. @@ -50284,7 +50134,6 @@ type IoFilterQueryIssueResult struct { func init() { t["IoFilterQueryIssueResult"] = reflect.TypeOf((*IoFilterQueryIssueResult)(nil)).Elem() - minAPIVersionForType["IoFilterQueryIssueResult"] = "6.0" } // This is the abstract base class for IP address. @@ -50294,7 +50143,6 @@ type IpAddress struct { func init() { t["IpAddress"] = reflect.TypeOf((*IpAddress)(nil)).Elem() - minAPIVersionForType["IpAddress"] = "5.5" } // The `IpAddressProfile` represents the Virtual NIC IP address. @@ -50307,7 +50155,6 @@ type IpAddressProfile struct { func init() { t["IpAddressProfile"] = reflect.TypeOf((*IpAddressProfile)(nil)).Elem() - minAPIVersionForType["IpAddressProfile"] = "4.0" } // An error occurred while running the IP/hostname generator application @@ -50374,18 +50221,17 @@ type IpPool struct { // The networks that are associated with this IP pool NetworkAssociation []IpPoolAssociation `xml:"networkAssociation,omitempty" json:"networkAssociation,omitempty"` // The number of IPv4 addresses available for allocation. - AvailableIpv4Addresses int32 `xml:"availableIpv4Addresses,omitempty" json:"availableIpv4Addresses,omitempty" vim:"5.1"` + AvailableIpv4Addresses int32 `xml:"availableIpv4Addresses,omitempty" json:"availableIpv4Addresses,omitempty"` // The number of IPv6 addresses available for allocation. - AvailableIpv6Addresses int32 `xml:"availableIpv6Addresses,omitempty" json:"availableIpv6Addresses,omitempty" vim:"5.1"` + AvailableIpv6Addresses int32 `xml:"availableIpv6Addresses,omitempty" json:"availableIpv6Addresses,omitempty"` // The number of allocated IPv4 addresses. - AllocatedIpv4Addresses int32 `xml:"allocatedIpv4Addresses,omitempty" json:"allocatedIpv4Addresses,omitempty" vim:"5.1"` + AllocatedIpv4Addresses int32 `xml:"allocatedIpv4Addresses,omitempty" json:"allocatedIpv4Addresses,omitempty"` // The number of allocated IPv6 addresses. - AllocatedIpv6Addresses int32 `xml:"allocatedIpv6Addresses,omitempty" json:"allocatedIpv6Addresses,omitempty" vim:"5.1"` + AllocatedIpv6Addresses int32 `xml:"allocatedIpv6Addresses,omitempty" json:"allocatedIpv6Addresses,omitempty"` } func init() { t["IpPool"] = reflect.TypeOf((*IpPool)(nil)).Elem() - minAPIVersionForType["IpPool"] = "4.0" } // Information about a network or portgroup that is associated to an IP pool. @@ -50405,7 +50251,6 @@ type IpPoolAssociation struct { func init() { t["IpPoolAssociation"] = reflect.TypeOf((*IpPoolAssociation)(nil)).Elem() - minAPIVersionForType["IpPoolAssociation"] = "4.0" } // Specifications of either IPv4 or IPv6 configuration to be used @@ -50423,22 +50268,22 @@ type IpPoolIpPoolConfigInfo struct { // Address of the subnet. // // For example: - // - IPv4: 192.168.5.0 - // - IPv6: 2001:0db8:85a3:: + // - IPv4: 192.168.5.0 + // - IPv6: 2001:0db8:85a3:: SubnetAddress string `xml:"subnetAddress,omitempty" json:"subnetAddress,omitempty"` // Netmask // // For example: - // - IPv4: 255.255.255.0 - // - IPv6: ffff:ffff:ffff:: + // - IPv4: 255.255.255.0 + // - IPv6: ffff:ffff:ffff:: Netmask string `xml:"netmask,omitempty" json:"netmask,omitempty"` // Gateway. // // This can be an empty string - if no gateway is configured. // // Examples: - // - IPv4: 192.168.5.1 - // - IPv6: 2001:0db8:85a3::1 + // - IPv4: 192.168.5.1 + // - IPv6: 2001:0db8:85a3::1 Gateway string `xml:"gateway,omitempty" json:"gateway,omitempty"` // IP range. // @@ -50447,14 +50292,14 @@ type IpPoolIpPoolConfigInfo struct { // of the range. // // For example: - // - 192.0.2.235 # 20 is the IPv4 range from 192.0.2.235 to 192.0.2.254 - // - 2001::7334 # 20 is the IPv6 range from 2001::7334 to 2001::7347 + // - 192.0.2.235 # 20 is the IPv4 range from 192.0.2.235 to 192.0.2.254 + // - 2001::7334 # 20 is the IPv6 range from 2001::7334 to 2001::7347 Range string `xml:"range,omitempty" json:"range,omitempty"` // DNS servers // // For example: - // - IPv4: \["10.20.0.1", "10.20.0.2"\] - // - IPv6: \["2001:0db8:85a3::0370:7334", "2001:0db8:85a3::0370:7335"\] + // - IPv4: \["10.20.0.1", "10.20.0.2"\] + // - IPv6: \["2001:0db8:85a3::0370:7334", "2001:0db8:85a3::0370:7335"\] // // If an empty list is passed, the existing value remains unchanged. To clear this // list, pass an array containing the empty string as it's only element. @@ -50468,7 +50313,6 @@ type IpPoolIpPoolConfigInfo struct { func init() { t["IpPoolIpPoolConfigInfo"] = reflect.TypeOf((*IpPoolIpPoolConfigInfo)(nil)).Elem() - minAPIVersionForType["IpPoolIpPoolConfigInfo"] = "4.0" } // Describes an IP allocation. @@ -50483,7 +50327,6 @@ type IpPoolManagerIpAllocation struct { func init() { t["IpPoolManagerIpAllocation"] = reflect.TypeOf((*IpPoolManagerIpAllocation)(nil)).Elem() - minAPIVersionForType["IpPoolManagerIpAllocation"] = "5.1" } // This class specifies a range of IP addresses by using prefix. @@ -50501,7 +50344,6 @@ type IpRange struct { func init() { t["IpRange"] = reflect.TypeOf((*IpRange)(nil)).Elem() - minAPIVersionForType["IpRange"] = "5.5" } // The `IpRouteProfile` data object represents the host IP route configuration. @@ -50518,7 +50360,6 @@ type IpRouteProfile struct { func init() { t["IpRouteProfile"] = reflect.TypeOf((*IpRouteProfile)(nil)).Elem() - minAPIVersionForType["IpRouteProfile"] = "4.0" } type IsClusteredVmdkEnabled IsClusteredVmdkEnabledRequestType @@ -50597,7 +50438,6 @@ type IscsiDependencyEntity struct { func init() { t["IscsiDependencyEntity"] = reflect.TypeOf((*IscsiDependencyEntity)(nil)).Elem() - minAPIVersionForType["IscsiDependencyEntity"] = "5.0" } // Base class for faults that can be thrown while invoking iSCSI management operations. @@ -50607,7 +50447,6 @@ type IscsiFault struct { func init() { t["IscsiFault"] = reflect.TypeOf((*IscsiFault)(nil)).Elem() - minAPIVersionForType["IscsiFault"] = "5.0" } type IscsiFaultFault BaseIscsiFault @@ -50629,7 +50468,6 @@ type IscsiFaultInvalidVnic struct { func init() { t["IscsiFaultInvalidVnic"] = reflect.TypeOf((*IscsiFaultInvalidVnic)(nil)).Elem() - minAPIVersionForType["IscsiFaultInvalidVnic"] = "5.0" } type IscsiFaultInvalidVnicFault IscsiFaultInvalidVnic @@ -50647,7 +50485,6 @@ type IscsiFaultPnicInUse struct { func init() { t["IscsiFaultPnicInUse"] = reflect.TypeOf((*IscsiFaultPnicInUse)(nil)).Elem() - minAPIVersionForType["IscsiFaultPnicInUse"] = "5.0" } type IscsiFaultPnicInUseFault IscsiFaultPnicInUse @@ -50665,7 +50502,6 @@ type IscsiFaultVnicAlreadyBound struct { func init() { t["IscsiFaultVnicAlreadyBound"] = reflect.TypeOf((*IscsiFaultVnicAlreadyBound)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicAlreadyBound"] = "5.0" } type IscsiFaultVnicAlreadyBoundFault IscsiFaultVnicAlreadyBound @@ -50683,7 +50519,6 @@ type IscsiFaultVnicHasActivePaths struct { func init() { t["IscsiFaultVnicHasActivePaths"] = reflect.TypeOf((*IscsiFaultVnicHasActivePaths)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicHasActivePaths"] = "5.0" } type IscsiFaultVnicHasActivePathsFault IscsiFaultVnicHasActivePaths @@ -50702,7 +50537,6 @@ type IscsiFaultVnicHasMultipleUplinks struct { func init() { t["IscsiFaultVnicHasMultipleUplinks"] = reflect.TypeOf((*IscsiFaultVnicHasMultipleUplinks)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicHasMultipleUplinks"] = "5.0" } type IscsiFaultVnicHasMultipleUplinksFault IscsiFaultVnicHasMultipleUplinks @@ -50721,7 +50555,6 @@ type IscsiFaultVnicHasNoUplinks struct { func init() { t["IscsiFaultVnicHasNoUplinks"] = reflect.TypeOf((*IscsiFaultVnicHasNoUplinks)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicHasNoUplinks"] = "5.0" } type IscsiFaultVnicHasNoUplinksFault IscsiFaultVnicHasNoUplinks @@ -50744,7 +50577,6 @@ type IscsiFaultVnicHasWrongUplink struct { func init() { t["IscsiFaultVnicHasWrongUplink"] = reflect.TypeOf((*IscsiFaultVnicHasWrongUplink)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicHasWrongUplink"] = "5.0" } type IscsiFaultVnicHasWrongUplinkFault IscsiFaultVnicHasWrongUplink @@ -50762,7 +50594,6 @@ type IscsiFaultVnicInUse struct { func init() { t["IscsiFaultVnicInUse"] = reflect.TypeOf((*IscsiFaultVnicInUse)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicInUse"] = "5.0" } type IscsiFaultVnicInUseFault IscsiFaultVnicInUse @@ -50783,7 +50614,6 @@ type IscsiFaultVnicIsLastPath struct { func init() { t["IscsiFaultVnicIsLastPath"] = reflect.TypeOf((*IscsiFaultVnicIsLastPath)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicIsLastPath"] = "5.0" } type IscsiFaultVnicIsLastPathFault IscsiFaultVnicIsLastPath @@ -50802,7 +50632,6 @@ type IscsiFaultVnicNotBound struct { func init() { t["IscsiFaultVnicNotBound"] = reflect.TypeOf((*IscsiFaultVnicNotBound)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicNotBound"] = "5.0" } type IscsiFaultVnicNotBoundFault IscsiFaultVnicNotBound @@ -50820,7 +50649,6 @@ type IscsiFaultVnicNotFound struct { func init() { t["IscsiFaultVnicNotFound"] = reflect.TypeOf((*IscsiFaultVnicNotFound)(nil)).Elem() - minAPIVersionForType["IscsiFaultVnicNotFound"] = "5.0" } type IscsiFaultVnicNotFoundFault IscsiFaultVnicNotFound @@ -50851,7 +50679,6 @@ type IscsiMigrationDependency struct { func init() { t["IscsiMigrationDependency"] = reflect.TypeOf((*IscsiMigrationDependency)(nil)).Elem() - minAPIVersionForType["IscsiMigrationDependency"] = "5.0" } // The `IscsiPortInfo` data object describes the @@ -50910,23 +50737,23 @@ type IscsiPortInfo struct { // // This property is set only when vnicDevice is associated with an // opaque network. - OpaqueNetworkId string `xml:"opaqueNetworkId,omitempty" json:"opaqueNetworkId,omitempty" vim:"6.5"` + OpaqueNetworkId string `xml:"opaqueNetworkId,omitempty" json:"opaqueNetworkId,omitempty"` // Type of the Opaque network to which the virtual NIC is connected. // // This property is set only when vnicDevice is associated with an // opaque network. - OpaqueNetworkType string `xml:"opaqueNetworkType,omitempty" json:"opaqueNetworkType,omitempty" vim:"6.5"` + OpaqueNetworkType string `xml:"opaqueNetworkType,omitempty" json:"opaqueNetworkType,omitempty"` // Name of the Opaque network to which the virtual NIC is connected. // // This property is set only when vnicDevice is associated with an // opaque network. - OpaqueNetworkName string `xml:"opaqueNetworkName,omitempty" json:"opaqueNetworkName,omitempty" vim:"6.5"` + OpaqueNetworkName string `xml:"opaqueNetworkName,omitempty" json:"opaqueNetworkName,omitempty"` // An ID assigned to the vmkernel adapter by external management plane // or controller. // // This property is set only when vnicDevice is associated with an // opaque network. - ExternalId string `xml:"externalId,omitempty" json:"externalId,omitempty" vim:"6.5"` + ExternalId string `xml:"externalId,omitempty" json:"externalId,omitempty"` // Status indicating whether the Virtual NIC is compliant with the // network policy that is required by iSCSI port binding. // @@ -50941,7 +50768,6 @@ type IscsiPortInfo struct { func init() { t["IscsiPortInfo"] = reflect.TypeOf((*IscsiPortInfo)(nil)).Elem() - minAPIVersionForType["IscsiPortInfo"] = "5.0" } // The `IscsiStatus` data object describes the @@ -50960,7 +50786,6 @@ type IscsiStatus struct { func init() { t["IscsiStatus"] = reflect.TypeOf((*IscsiStatus)(nil)).Elem() - minAPIVersionForType["IscsiStatus"] = "5.0" } // This data object type describes a file that is an ISO CD-ROM image. @@ -51067,7 +50892,6 @@ type KernelModuleInfo struct { func init() { t["KernelModuleInfo"] = reflect.TypeOf((*KernelModuleInfo)(nil)).Elem() - minAPIVersionForType["KernelModuleInfo"] = "4.0" } // Information about a module section. @@ -51082,7 +50906,6 @@ type KernelModuleSectionInfo struct { func init() { t["KernelModuleSectionInfo"] = reflect.TypeOf((*KernelModuleSectionInfo)(nil)).Elem() - minAPIVersionForType["KernelModuleSectionInfo"] = "4.0" } // Non-localized key/value pair in which the @@ -51098,7 +50921,6 @@ type KeyAnyValue struct { func init() { t["KeyAnyValue"] = reflect.TypeOf((*KeyAnyValue)(nil)).Elem() - minAPIVersionForType["KeyAnyValue"] = "4.0" } // An KeyNotFound fault is returned when the key does not exist among @@ -51112,7 +50934,6 @@ type KeyNotFound struct { func init() { t["KeyNotFound"] = reflect.TypeOf((*KeyNotFound)(nil)).Elem() - minAPIVersionForType["KeyNotFound"] = "6.7.2" } type KeyNotFoundFault KeyNotFound @@ -51134,7 +50955,6 @@ type KeyProviderId struct { func init() { t["KeyProviderId"] = reflect.TypeOf((*KeyProviderId)(nil)).Elem() - minAPIVersionForType["KeyProviderId"] = "6.5" } // Non-localized key/value pair @@ -51149,7 +50969,6 @@ type KeyValue struct { func init() { t["KeyValue"] = reflect.TypeOf((*KeyValue)(nil)).Elem() - minAPIVersionForType["KeyValue"] = "2.5" } // Data Object representing a cluster of KMIP servers. @@ -51171,7 +50990,7 @@ type KmipClusterInfo struct { // Key provider management type. // // See `KmipClusterInfoKmsManagementType_enum` for valid values. - ManagementType string `xml:"managementType,omitempty" json:"managementType,omitempty" vim:"7.0"` + ManagementType string `xml:"managementType,omitempty" json:"managementType,omitempty"` // Use this cluster as default for the managed entities, // when the optional CryptoKeyId.providerId is not set. // @@ -51179,15 +50998,14 @@ type KmipClusterInfo struct { // supported managed entity type. // // Refers instances of `ManagedEntity`. - UseAsEntityDefault []ManagedObjectReference `xml:"useAsEntityDefault,omitempty" json:"useAsEntityDefault,omitempty" vim:"7.0"` - HasBackup *bool `xml:"hasBackup" json:"hasBackup,omitempty"` - TpmRequired *bool `xml:"tpmRequired" json:"tpmRequired,omitempty"` - KeyId string `xml:"keyId,omitempty" json:"keyId,omitempty"` + UseAsEntityDefault []ManagedObjectReference `xml:"useAsEntityDefault,omitempty" json:"useAsEntityDefault,omitempty"` + HasBackup *bool `xml:"hasBackup" json:"hasBackup,omitempty" vim:"7.0.2.0"` + TpmRequired *bool `xml:"tpmRequired" json:"tpmRequired,omitempty" vim:"7.0.2.0"` + KeyId string `xml:"keyId,omitempty" json:"keyId,omitempty" vim:"7.0.2.0"` } func init() { t["KmipClusterInfo"] = reflect.TypeOf((*KmipClusterInfo)(nil)).Elem() - minAPIVersionForType["KmipClusterInfo"] = "6.5" } // Data Object representing a KMIP server connection information. @@ -51233,7 +51051,6 @@ type KmipServerInfo struct { func init() { t["KmipServerInfo"] = reflect.TypeOf((*KmipServerInfo)(nil)).Elem() - minAPIVersionForType["KmipServerInfo"] = "6.5" } // Data Object representing a KMIP server connection spec. @@ -51255,7 +51072,6 @@ type KmipServerSpec struct { func init() { t["KmipServerSpec"] = reflect.TypeOf((*KmipServerSpec)(nil)).Elem() - minAPIVersionForType["KmipServerSpec"] = "6.5" } // Data Object representing a KMIP server status. @@ -51274,7 +51090,6 @@ type KmipServerStatus struct { func init() { t["KmipServerStatus"] = reflect.TypeOf((*KmipServerStatus)(nil)).Elem() - minAPIVersionForType["KmipServerStatus"] = "6.5" } // The virtual machine is using a 2TB+ RDM device and operation is @@ -51288,7 +51103,6 @@ type LargeRDMConversionNotSupported struct { func init() { t["LargeRDMConversionNotSupported"] = reflect.TypeOf((*LargeRDMConversionNotSupported)(nil)).Elem() - minAPIVersionForType["LargeRDMConversionNotSupported"] = "5.0" } type LargeRDMConversionNotSupportedFault LargeRDMConversionNotSupported @@ -51319,7 +51133,6 @@ type LargeRDMNotSupportedOnDatastore struct { func init() { t["LargeRDMNotSupportedOnDatastore"] = reflect.TypeOf((*LargeRDMNotSupportedOnDatastore)(nil)).Elem() - minAPIVersionForType["LargeRDMNotSupportedOnDatastore"] = "5.0" } type LargeRDMNotSupportedOnDatastoreFault LargeRDMNotSupportedOnDatastore @@ -51362,7 +51175,6 @@ type LatencySensitivity struct { func init() { t["LatencySensitivity"] = reflect.TypeOf((*LatencySensitivity)(nil)).Elem() - minAPIVersionForType["LatencySensitivity"] = "5.1" } // The parameters of `HostActiveDirectoryAuthentication.LeaveCurrentDomain_Task`. @@ -51419,7 +51231,6 @@ type LicenseAssignmentFailed struct { func init() { t["LicenseAssignmentFailed"] = reflect.TypeOf((*LicenseAssignmentFailed)(nil)).Elem() - minAPIVersionForType["LicenseAssignmentFailed"] = "4.0" } type LicenseAssignmentFailedFault LicenseAssignmentFailed @@ -51523,7 +51334,6 @@ type LicenseDiagnostics struct { func init() { t["LicenseDiagnostics"] = reflect.TypeOf((*LicenseDiagnostics)(nil)).Elem() - minAPIVersionForType["LicenseDiagnostics"] = "2.5" } // A LicenseDowngradeDisallowed fault is thrown if an assignment operation tries to downgrade a license that does have certain licensed features which are in use. @@ -51539,7 +51349,6 @@ type LicenseDowngradeDisallowed struct { func init() { t["LicenseDowngradeDisallowed"] = reflect.TypeOf((*LicenseDowngradeDisallowed)(nil)).Elem() - minAPIVersionForType["LicenseDowngradeDisallowed"] = "4.0" } type LicenseDowngradeDisallowedFault LicenseDowngradeDisallowed @@ -51560,7 +51369,6 @@ type LicenseEntityNotFound struct { func init() { t["LicenseEntityNotFound"] = reflect.TypeOf((*LicenseEntityNotFound)(nil)).Elem() - minAPIVersionForType["LicenseEntityNotFound"] = "4.0" } type LicenseEntityNotFoundFault LicenseEntityNotFound @@ -51588,7 +51396,6 @@ type LicenseExpired struct { func init() { t["LicenseExpired"] = reflect.TypeOf((*LicenseExpired)(nil)).Elem() - minAPIVersionForType["LicenseExpired"] = "4.0" } // This event records the expiration of a license. @@ -51624,7 +51431,7 @@ type LicenseFeatureInfo struct { // The display string for the feature name. FeatureName string `xml:"featureName" json:"featureName"` // A human readable description of what function this feature enables. - FeatureDescription string `xml:"featureDescription,omitempty" json:"featureDescription,omitempty" vim:"2.5"` + FeatureDescription string `xml:"featureDescription,omitempty" json:"featureDescription,omitempty"` // Describes the state of the feature based on the current edition license. // // This @@ -51638,13 +51445,13 @@ type LicenseFeatureInfo struct { // Describe any restriction on the source of a license for this feature. // // See also `LicenseFeatureInfoSourceRestriction_enum`. - SourceRestriction string `xml:"sourceRestriction,omitempty" json:"sourceRestriction,omitempty" vim:"2.5"` + SourceRestriction string `xml:"sourceRestriction,omitempty" json:"sourceRestriction,omitempty"` // Report List of feature keys used by this edition. - DependentKey []string `xml:"dependentKey,omitempty" json:"dependentKey,omitempty" vim:"2.5"` + DependentKey []string `xml:"dependentKey,omitempty" json:"dependentKey,omitempty"` // Flag to indicate whether the feature is an edition. - Edition *bool `xml:"edition" json:"edition,omitempty" vim:"2.5"` + Edition *bool `xml:"edition" json:"edition,omitempty"` // Date representing the expiration date - ExpiresOn *time.Time `xml:"expiresOn" json:"expiresOn,omitempty" vim:"2.5"` + ExpiresOn *time.Time `xml:"expiresOn" json:"expiresOn,omitempty"` } func init() { @@ -51660,7 +51467,6 @@ type LicenseKeyEntityMismatch struct { func init() { t["LicenseKeyEntityMismatch"] = reflect.TypeOf((*LicenseKeyEntityMismatch)(nil)).Elem() - minAPIVersionForType["LicenseKeyEntityMismatch"] = "4.0" } type LicenseKeyEntityMismatchFault LicenseKeyEntityMismatch @@ -51679,7 +51485,6 @@ type LicenseManagerEvaluationInfo struct { func init() { t["LicenseManagerEvaluationInfo"] = reflect.TypeOf((*LicenseManagerEvaluationInfo)(nil)).Elem() - minAPIVersionForType["LicenseManagerEvaluationInfo"] = "4.0" } // Encapsulates information about a license @@ -51708,7 +51513,6 @@ type LicenseManagerLicenseInfo struct { func init() { t["LicenseManagerLicenseInfo"] = reflect.TypeOf((*LicenseManagerLicenseInfo)(nil)).Elem() - minAPIVersionForType["LicenseManagerLicenseInfo"] = "4.0" } // This event records that the inventory is not license compliant. @@ -51722,7 +51526,6 @@ type LicenseNonComplianceEvent struct { func init() { t["LicenseNonComplianceEvent"] = reflect.TypeOf((*LicenseNonComplianceEvent)(nil)).Elem() - minAPIVersionForType["LicenseNonComplianceEvent"] = "4.0" } // Deprecated as of vSphere API 4.0, this is not used by the system. @@ -51761,7 +51564,6 @@ type LicenseRestricted struct { func init() { t["LicenseRestricted"] = reflect.TypeOf((*LicenseRestricted)(nil)).Elem() - minAPIVersionForType["LicenseRestricted"] = "2.5" } // This event records if the required licenses could not be reserved because @@ -51772,7 +51574,6 @@ type LicenseRestrictedEvent struct { func init() { t["LicenseRestrictedEvent"] = reflect.TypeOf((*LicenseRestrictedEvent)(nil)).Elem() - minAPIVersionForType["LicenseRestrictedEvent"] = "2.5" } type LicenseRestrictedFault LicenseRestricted @@ -51868,7 +51669,6 @@ type LicenseSourceUnavailable struct { func init() { t["LicenseSourceUnavailable"] = reflect.TypeOf((*LicenseSourceUnavailable)(nil)).Elem() - minAPIVersionForType["LicenseSourceUnavailable"] = "2.5" } type LicenseSourceUnavailableFault LicenseSourceUnavailable @@ -51914,7 +51714,6 @@ type LimitExceeded struct { func init() { t["LimitExceeded"] = reflect.TypeOf((*LimitExceeded)(nil)).Elem() - minAPIVersionForType["LimitExceeded"] = "4.0" } type LimitExceededFault LimitExceeded @@ -51942,7 +51741,6 @@ type LinkDiscoveryProtocolConfig struct { func init() { t["LinkDiscoveryProtocolConfig"] = reflect.TypeOf((*LinkDiscoveryProtocolConfig)(nil)).Elem() - minAPIVersionForType["LinkDiscoveryProtocolConfig"] = "4.0" } // The Link Layer Discovery Protocol information. @@ -51975,7 +51773,6 @@ type LinkLayerDiscoveryProtocolInfo struct { func init() { t["LinkLayerDiscoveryProtocolInfo"] = reflect.TypeOf((*LinkLayerDiscoveryProtocolInfo)(nil)).Elem() - minAPIVersionForType["LinkLayerDiscoveryProtocolInfo"] = "5.0" } // The LinkProfile data object represents a subprofile @@ -51986,7 +51783,6 @@ type LinkProfile struct { func init() { t["LinkProfile"] = reflect.TypeOf((*LinkProfile)(nil)).Elem() - minAPIVersionForType["LinkProfile"] = "4.0" } // Customization operation is performed on a linux source vm that @@ -52437,7 +52233,7 @@ type LocalDatastoreCreatedEvent struct { // The associated datastore. Datastore DatastoreEventArgument `xml:"datastore" json:"datastore"` // Url of the associated datastore. - DatastoreUrl string `xml:"datastoreUrl,omitempty" json:"datastoreUrl,omitempty" vim:"6.5"` + DatastoreUrl string `xml:"datastoreUrl,omitempty" json:"datastoreUrl,omitempty"` } func init() { @@ -52482,7 +52278,6 @@ type LocalTSMEnabledEvent struct { func init() { t["LocalTSMEnabledEvent"] = reflect.TypeOf((*LocalTSMEnabledEvent)(nil)).Elem() - minAPIVersionForType["LocalTSMEnabledEvent"] = "4.1" } // Message data which is intended to be displayed according @@ -52528,7 +52323,6 @@ type LocalizableMessage struct { func init() { t["LocalizableMessage"] = reflect.TypeOf((*LocalizableMessage)(nil)).Elem() - minAPIVersionForType["LocalizableMessage"] = "4.0" } // Description of an available message catalog @@ -52558,12 +52352,11 @@ type LocalizationManagerMessageCatalog struct { // The format is dot-separated version string, e.g. // // "1.2.3". - Version string `xml:"version,omitempty" json:"version,omitempty" vim:"5.0"` + Version string `xml:"version,omitempty" json:"version,omitempty"` } func init() { t["LocalizationManagerMessageCatalog"] = reflect.TypeOf((*LocalizationManagerMessageCatalog)(nil)).Elem() - minAPIVersionForType["LocalizationManagerMessageCatalog"] = "4.0" } // A wrapper class used to pass MethodFault data objects over the wire @@ -52598,7 +52391,6 @@ type LockerMisconfiguredEvent struct { func init() { t["LockerMisconfiguredEvent"] = reflect.TypeOf((*LockerMisconfiguredEvent)(nil)).Elem() - minAPIVersionForType["LockerMisconfiguredEvent"] = "2.5" } // Locker was reconfigured to a new location. @@ -52618,7 +52410,6 @@ type LockerReconfiguredEvent struct { func init() { t["LockerReconfiguredEvent"] = reflect.TypeOf((*LockerReconfiguredEvent)(nil)).Elem() - minAPIVersionForType["LockerReconfiguredEvent"] = "2.5" } // A LogBundlingFailed exception is thrown when generation of a diagnostic @@ -52868,7 +52659,6 @@ type LongPolicy struct { func init() { t["LongPolicy"] = reflect.TypeOf((*LongPolicy)(nil)).Elem() - minAPIVersionForType["LongPolicy"] = "4.0" } type LookupDvPortGroup LookupDvPortGroupRequestType @@ -52926,7 +52716,6 @@ type MacAddress struct { func init() { t["MacAddress"] = reflect.TypeOf((*MacAddress)(nil)).Elem() - minAPIVersionForType["MacAddress"] = "5.5" } // This class defines a range of MAC address. @@ -52948,7 +52737,6 @@ type MacRange struct { func init() { t["MacRange"] = reflect.TypeOf((*MacRange)(nil)).Elem() - minAPIVersionForType["MacRange"] = "5.5" } // Migration of the virtual machine to the target host will need a move of @@ -52960,7 +52748,6 @@ type MaintenanceModeFileMove struct { func init() { t["MaintenanceModeFileMove"] = reflect.TypeOf((*MaintenanceModeFileMove)(nil)).Elem() - minAPIVersionForType["MaintenanceModeFileMove"] = "2.5" } type MaintenanceModeFileMoveFault MaintenanceModeFileMove @@ -53080,7 +52867,6 @@ type ManagedByInfo struct { func init() { t["ManagedByInfo"] = reflect.TypeOf((*ManagedByInfo)(nil)).Elem() - minAPIVersionForType["ManagedByInfo"] = "5.0" } // The general event argument for a managed entity. @@ -53386,7 +53172,6 @@ type MemoryFileFormatNotSupportedByDatastore struct { func init() { t["MemoryFileFormatNotSupportedByDatastore"] = reflect.TypeOf((*MemoryFileFormatNotSupportedByDatastore)(nil)).Elem() - minAPIVersionForType["MemoryFileFormatNotSupportedByDatastore"] = "6.0" } type MemoryFileFormatNotSupportedByDatastoreFault MemoryFileFormatNotSupportedByDatastore @@ -53402,7 +53187,6 @@ type MemoryHotPlugNotSupported struct { func init() { t["MemoryHotPlugNotSupported"] = reflect.TypeOf((*MemoryHotPlugNotSupported)(nil)).Elem() - minAPIVersionForType["MemoryHotPlugNotSupported"] = "4.0" } type MemoryHotPlugNotSupportedFault MemoryHotPlugNotSupported @@ -53426,7 +53210,6 @@ type MemorySizeNotRecommended struct { func init() { t["MemorySizeNotRecommended"] = reflect.TypeOf((*MemorySizeNotRecommended)(nil)).Elem() - minAPIVersionForType["MemorySizeNotRecommended"] = "2.5" } type MemorySizeNotRecommendedFault MemorySizeNotRecommended @@ -53450,7 +53233,6 @@ type MemorySizeNotSupported struct { func init() { t["MemorySizeNotSupported"] = reflect.TypeOf((*MemorySizeNotSupported)(nil)).Elem() - minAPIVersionForType["MemorySizeNotSupported"] = "2.5" } // The memory amount of the virtual machine is not within the acceptable @@ -53470,7 +53252,6 @@ type MemorySizeNotSupportedByDatastore struct { func init() { t["MemorySizeNotSupportedByDatastore"] = reflect.TypeOf((*MemorySizeNotSupportedByDatastore)(nil)).Elem() - minAPIVersionForType["MemorySizeNotSupportedByDatastore"] = "5.0" } type MemorySizeNotSupportedByDatastoreFault MemorySizeNotSupportedByDatastore @@ -53587,7 +53368,6 @@ type MethodAlreadyDisabledFault struct { func init() { t["MethodAlreadyDisabledFault"] = reflect.TypeOf((*MethodAlreadyDisabledFault)(nil)).Elem() - minAPIVersionForType["MethodAlreadyDisabledFault"] = "4.1" } type MethodAlreadyDisabledFaultFault MethodAlreadyDisabledFault @@ -53621,7 +53401,6 @@ type MethodDisabled struct { func init() { t["MethodDisabled"] = reflect.TypeOf((*MethodDisabled)(nil)).Elem() - minAPIVersionForType["MethodDisabled"] = "2.5" } type MethodDisabledFault MethodDisabled @@ -53634,11 +53413,11 @@ func init() { // that an application might handle. type MethodFault struct { // Fault which is the cause of this fault. - FaultCause *LocalizedMethodFault `xml:"faultCause,omitempty" json:"faultCause,omitempty" vim:"4.0"` + FaultCause *LocalizedMethodFault `xml:"faultCause,omitempty" json:"faultCause,omitempty"` // Message which has details about the error // Message can also contain a key to message catalog which // can be used to generate better localized messages. - FaultMessage []LocalizableMessage `xml:"faultMessage,omitempty" json:"faultMessage,omitempty" vim:"4.0"` + FaultMessage []LocalizableMessage `xml:"faultMessage,omitempty" json:"faultMessage,omitempty"` } func init() { @@ -53710,7 +53489,7 @@ type MetricAlarmExpression struct { // // If unset, the yellow status is // triggered immediately when the yellow condition becomes true. - YellowInterval int32 `xml:"yellowInterval,omitempty" json:"yellowInterval,omitempty" vim:"4.0"` + YellowInterval int32 `xml:"yellowInterval,omitempty" json:"yellowInterval,omitempty"` // Whether or not to test for a red condition. // // If not set, do not calculate red status. @@ -53721,7 +53500,7 @@ type MetricAlarmExpression struct { // // If unset, the red status is // triggered immediately when the red condition becomes true. - RedInterval int32 `xml:"redInterval,omitempty" json:"redInterval,omitempty" vim:"4.0"` + RedInterval int32 `xml:"redInterval,omitempty" json:"redInterval,omitempty"` } func init() { @@ -53778,7 +53557,6 @@ type MigrationDisabled struct { func init() { t["MigrationDisabled"] = reflect.TypeOf((*MigrationDisabled)(nil)).Elem() - minAPIVersionForType["MigrationDisabled"] = "4.0" } type MigrationDisabledFault MigrationDisabled @@ -53872,7 +53650,6 @@ type MigrationFeatureNotSupported struct { func init() { t["MigrationFeatureNotSupported"] = reflect.TypeOf((*MigrationFeatureNotSupported)(nil)).Elem() - minAPIVersionForType["MigrationFeatureNotSupported"] = "2.5" } type MigrationFeatureNotSupportedFault BaseMigrationFeatureNotSupported @@ -53918,7 +53695,6 @@ type MigrationNotReady struct { func init() { t["MigrationNotReady"] = reflect.TypeOf((*MigrationNotReady)(nil)).Elem() - minAPIVersionForType["MigrationNotReady"] = "4.0" } type MigrationNotReadyFault MigrationNotReady @@ -53982,7 +53758,6 @@ type MismatchedBundle struct { func init() { t["MismatchedBundle"] = reflect.TypeOf((*MismatchedBundle)(nil)).Elem() - minAPIVersionForType["MismatchedBundle"] = "2.5" } type MismatchedBundleFault MismatchedBundle @@ -54054,7 +53829,6 @@ type MissingBmcSupport struct { func init() { t["MissingBmcSupport"] = reflect.TypeOf((*MissingBmcSupport)(nil)).Elem() - minAPIVersionForType["MissingBmcSupport"] = "4.0" } type MissingBmcSupportFault MissingBmcSupport @@ -54086,7 +53860,6 @@ type MissingIpPool struct { func init() { t["MissingIpPool"] = reflect.TypeOf((*MissingIpPool)(nil)).Elem() - minAPIVersionForType["MissingIpPool"] = "5.0" } type MissingIpPoolFault MissingIpPool @@ -54118,7 +53891,6 @@ type MissingNetworkIpConfig struct { func init() { t["MissingNetworkIpConfig"] = reflect.TypeOf((*MissingNetworkIpConfig)(nil)).Elem() - minAPIVersionForType["MissingNetworkIpConfig"] = "4.0" } type MissingNetworkIpConfigFault MissingNetworkIpConfig @@ -54139,10 +53911,10 @@ type MissingObject struct { // Fault describing the failure to lookup this object // // The possible faults for missing objects are: - // - `SystemError` if there was some unknown problem - // looking up the object - // - `ManagedObjectNotFound` if the object is no - // longer available + // - `SystemError` if there was some unknown problem + // looking up the object + // - `ManagedObjectNotFound` if the object is no + // longer available Fault LocalizedMethodFault `xml:"fault" json:"fault"` } @@ -54158,7 +53930,6 @@ type MissingPowerOffConfiguration struct { func init() { t["MissingPowerOffConfiguration"] = reflect.TypeOf((*MissingPowerOffConfiguration)(nil)).Elem() - minAPIVersionForType["MissingPowerOffConfiguration"] = "4.0" } type MissingPowerOffConfigurationFault MissingPowerOffConfiguration @@ -54175,7 +53946,6 @@ type MissingPowerOnConfiguration struct { func init() { t["MissingPowerOnConfiguration"] = reflect.TypeOf((*MissingPowerOnConfiguration)(nil)).Elem() - minAPIVersionForType["MissingPowerOnConfiguration"] = "4.0" } type MissingPowerOnConfigurationFault MissingPowerOnConfiguration @@ -54193,10 +53963,10 @@ type MissingProperty struct { // Fault describing the failure to retrieve the property value. // // The possible faults for missing properties are: - // - `SystemError` if there was some unknown problem - // reading the value - // - `SecurityError` if the logged in session did - // not have permission to read the value + // - `SystemError` if there was some unknown problem + // reading the value + // - `SecurityError` if the logged in session did + // not have permission to read the value Fault LocalizedMethodFault `xml:"fault" json:"fault"` } @@ -54230,7 +54000,6 @@ type MksConnectionLimitReached struct { func init() { t["MksConnectionLimitReached"] = reflect.TypeOf((*MksConnectionLimitReached)(nil)).Elem() - minAPIVersionForType["MksConnectionLimitReached"] = "5.0" } type MksConnectionLimitReachedFault MksConnectionLimitReached @@ -54728,7 +54497,7 @@ type MoveVirtualDiskRequestType struct { // If not specified, it is assumed to be false Force *bool `xml:"force" json:"force,omitempty"` // User can specify new set of profile when moving virtual disk. - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty" vim:"5.5"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty"` } func init() { @@ -54753,7 +54522,6 @@ type MtuMatchEvent struct { func init() { t["MtuMatchEvent"] = reflect.TypeOf((*MtuMatchEvent)(nil)).Elem() - minAPIVersionForType["MtuMatchEvent"] = "5.1" } // The value of MTU configured in the vSphere Distributed Switch @@ -54764,7 +54532,6 @@ type MtuMismatchEvent struct { func init() { t["MtuMismatchEvent"] = reflect.TypeOf((*MtuMismatchEvent)(nil)).Elem() - minAPIVersionForType["MtuMismatchEvent"] = "5.1" } // The multi-writer sharing of the specified virtual disk is not supported. @@ -54779,7 +54546,6 @@ type MultiWriterNotSupported struct { func init() { t["MultiWriterNotSupported"] = reflect.TypeOf((*MultiWriterNotSupported)(nil)).Elem() - minAPIVersionForType["MultiWriterNotSupported"] = "6.0" } type MultiWriterNotSupportedFault MultiWriterNotSupported @@ -54810,7 +54576,6 @@ type MultipleCertificatesVerifyFault struct { func init() { t["MultipleCertificatesVerifyFault"] = reflect.TypeOf((*MultipleCertificatesVerifyFault)(nil)).Elem() - minAPIVersionForType["MultipleCertificatesVerifyFault"] = "4.0" } type MultipleCertificatesVerifyFaultFault MultipleCertificatesVerifyFault @@ -54855,7 +54620,7 @@ type NASDatastoreCreatedEvent struct { // The associated datastore. Datastore DatastoreEventArgument `xml:"datastore" json:"datastore"` // Url of the associated datastore. - DatastoreUrl string `xml:"datastoreUrl,omitempty" json:"datastoreUrl,omitempty" vim:"6.5"` + DatastoreUrl string `xml:"datastoreUrl,omitempty" json:"datastoreUrl,omitempty"` } func init() { @@ -54885,7 +54650,6 @@ type NamePasswordAuthentication struct { func init() { t["NamePasswordAuthentication"] = reflect.TypeOf((*NamePasswordAuthentication)(nil)).Elem() - minAPIVersionForType["NamePasswordAuthentication"] = "5.0" } // A NamespaceFull fault is thrown when an operation @@ -54907,7 +54671,6 @@ type NamespaceFull struct { func init() { t["NamespaceFull"] = reflect.TypeOf((*NamespaceFull)(nil)).Elem() - minAPIVersionForType["NamespaceFull"] = "5.1" } type NamespaceFullFault NamespaceFull @@ -54927,7 +54690,6 @@ type NamespaceLimitReached struct { func init() { t["NamespaceLimitReached"] = reflect.TypeOf((*NamespaceLimitReached)(nil)).Elem() - minAPIVersionForType["NamespaceLimitReached"] = "5.1" } type NamespaceLimitReachedFault NamespaceLimitReached @@ -54947,7 +54709,6 @@ type NamespaceWriteProtected struct { func init() { t["NamespaceWriteProtected"] = reflect.TypeOf((*NamespaceWriteProtected)(nil)).Elem() - minAPIVersionForType["NamespaceWriteProtected"] = "5.1" } type NamespaceWriteProtectedFault NamespaceWriteProtected @@ -54966,7 +54727,6 @@ type NasConfigFault struct { func init() { t["NasConfigFault"] = reflect.TypeOf((*NasConfigFault)(nil)).Elem() - minAPIVersionForType["NasConfigFault"] = "2.5 U2" } type NasConfigFaultFault BaseNasConfigFault @@ -54989,7 +54749,6 @@ type NasConnectionLimitReached struct { func init() { t["NasConnectionLimitReached"] = reflect.TypeOf((*NasConnectionLimitReached)(nil)).Elem() - minAPIVersionForType["NasConnectionLimitReached"] = "2.5 U2" } type NasConnectionLimitReachedFault NasConnectionLimitReached @@ -55028,7 +54787,6 @@ type NasSessionCredentialConflict struct { func init() { t["NasSessionCredentialConflict"] = reflect.TypeOf((*NasSessionCredentialConflict)(nil)).Elem() - minAPIVersionForType["NasSessionCredentialConflict"] = "2.5 U2" } type NasSessionCredentialConflictFault NasSessionCredentialConflict @@ -55051,7 +54809,6 @@ type NasStorageProfile struct { func init() { t["NasStorageProfile"] = reflect.TypeOf((*NasStorageProfile)(nil)).Elem() - minAPIVersionForType["NasStorageProfile"] = "4.0" } // This fault is thrown when an operation to configure a NAS datastore @@ -55067,7 +54824,6 @@ type NasVolumeNotMounted struct { func init() { t["NasVolumeNotMounted"] = reflect.TypeOf((*NasVolumeNotMounted)(nil)).Elem() - minAPIVersionForType["NasVolumeNotMounted"] = "2.5 U2" } type NasVolumeNotMountedFault NasVolumeNotMounted @@ -55091,7 +54847,6 @@ type NegatableExpression struct { func init() { t["NegatableExpression"] = reflect.TypeOf((*NegatableExpression)(nil)).Elem() - minAPIVersionForType["NegatableExpression"] = "5.5" } // This data object type describes the NetBIOS configuration of @@ -55108,7 +54863,6 @@ type NetBIOSConfigInfo struct { func init() { t["NetBIOSConfigInfo"] = reflect.TypeOf((*NetBIOSConfigInfo)(nil)).Elem() - minAPIVersionForType["NetBIOSConfigInfo"] = "4.1" } // Dynamic Host Configuration Protocol reporting for IP version 4 and version 6. @@ -55123,7 +54877,6 @@ type NetDhcpConfigInfo struct { func init() { t["NetDhcpConfigInfo"] = reflect.TypeOf((*NetDhcpConfigInfo)(nil)).Elem() - minAPIVersionForType["NetDhcpConfigInfo"] = "4.1" } // Provides for reporting of DHCP client. @@ -55151,7 +54904,6 @@ type NetDhcpConfigInfoDhcpOptions struct { func init() { t["NetDhcpConfigInfoDhcpOptions"] = reflect.TypeOf((*NetDhcpConfigInfoDhcpOptions)(nil)).Elem() - minAPIVersionForType["NetDhcpConfigInfoDhcpOptions"] = "4.1" } // Dynamic Host Configuration Protocol Configuration for IP version 4 and version 6. @@ -55166,7 +54918,6 @@ type NetDhcpConfigSpec struct { func init() { t["NetDhcpConfigSpec"] = reflect.TypeOf((*NetDhcpConfigSpec)(nil)).Elem() - minAPIVersionForType["NetDhcpConfigSpec"] = "4.1" } // Provides for configuration of IPv6 @@ -55187,7 +54938,6 @@ type NetDhcpConfigSpecDhcpOptionsSpec struct { func init() { t["NetDhcpConfigSpecDhcpOptionsSpec"] = reflect.TypeOf((*NetDhcpConfigSpecDhcpOptionsSpec)(nil)).Elem() - minAPIVersionForType["NetDhcpConfigSpecDhcpOptionsSpec"] = "4.1" } // Domain Name Server (DNS) Configuration Specification - @@ -55225,7 +54975,6 @@ type NetDnsConfigInfo struct { func init() { t["NetDnsConfigInfo"] = reflect.TypeOf((*NetDnsConfigInfo)(nil)).Elem() - minAPIVersionForType["NetDnsConfigInfo"] = "4.1" } // Domain Name Server (DNS) Configuration Specification - @@ -55273,7 +55022,6 @@ type NetDnsConfigSpec struct { func init() { t["NetDnsConfigSpec"] = reflect.TypeOf((*NetDnsConfigSpec)(nil)).Elem() - minAPIVersionForType["NetDnsConfigSpec"] = "4.1" } // Protocol version independent address reporting data object for network @@ -55297,7 +55045,6 @@ type NetIpConfigInfo struct { func init() { t["NetIpConfigInfo"] = reflect.TypeOf((*NetIpConfigInfo)(nil)).Elem() - minAPIVersionForType["NetIpConfigInfo"] = "4.1" } // Information about a specific IP Address. @@ -55345,7 +55092,6 @@ type NetIpConfigInfoIpAddress struct { func init() { t["NetIpConfigInfoIpAddress"] = reflect.TypeOf((*NetIpConfigInfoIpAddress)(nil)).Elem() - minAPIVersionForType["NetIpConfigInfoIpAddress"] = "4.1" } // Internet Protocol Address Configuration for version 4 and version 6. @@ -55364,7 +55110,6 @@ type NetIpConfigSpec struct { func init() { t["NetIpConfigSpec"] = reflect.TypeOf((*NetIpConfigSpec)(nil)).Elem() - minAPIVersionForType["NetIpConfigSpec"] = "4.1" } // Provides for configuration of IP Addresses. @@ -55400,7 +55145,6 @@ type NetIpConfigSpecIpAddressSpec struct { func init() { t["NetIpConfigSpecIpAddressSpec"] = reflect.TypeOf((*NetIpConfigSpecIpAddressSpec)(nil)).Elem() - minAPIVersionForType["NetIpConfigSpecIpAddressSpec"] = "4.1" } // This data object reports the IP Route Table. @@ -55413,7 +55157,6 @@ type NetIpRouteConfigInfo struct { func init() { t["NetIpRouteConfigInfo"] = reflect.TypeOf((*NetIpRouteConfigInfo)(nil)).Elem() - minAPIVersionForType["NetIpRouteConfigInfo"] = "4.1" } // Next hop Gateway for a given route. @@ -55426,7 +55169,6 @@ type NetIpRouteConfigInfoGateway struct { func init() { t["NetIpRouteConfigInfoGateway"] = reflect.TypeOf((*NetIpRouteConfigInfoGateway)(nil)).Elem() - minAPIVersionForType["NetIpRouteConfigInfoGateway"] = "4.1" } // IpRoute report an individual host, network or default destination network @@ -55455,7 +55197,6 @@ type NetIpRouteConfigInfoIpRoute struct { func init() { t["NetIpRouteConfigInfoIpRoute"] = reflect.TypeOf((*NetIpRouteConfigInfoIpRoute)(nil)).Elem() - minAPIVersionForType["NetIpRouteConfigInfoIpRoute"] = "4.1" } // Address family independent IP Route Table Configuration data object. @@ -55468,7 +55209,6 @@ type NetIpRouteConfigSpec struct { func init() { t["NetIpRouteConfigSpec"] = reflect.TypeOf((*NetIpRouteConfigSpec)(nil)).Elem() - minAPIVersionForType["NetIpRouteConfigSpec"] = "4.1" } // IpRoute report an individual host, network or default destination network @@ -55482,7 +55222,6 @@ type NetIpRouteConfigSpecGatewaySpec struct { func init() { t["NetIpRouteConfigSpecGatewaySpec"] = reflect.TypeOf((*NetIpRouteConfigSpecGatewaySpec)(nil)).Elem() - minAPIVersionForType["NetIpRouteConfigSpecGatewaySpec"] = "4.1" } // Specify an individual host, network or default destination network @@ -55514,7 +55253,6 @@ type NetIpRouteConfigSpecIpRouteSpec struct { func init() { t["NetIpRouteConfigSpecIpRouteSpec"] = reflect.TypeOf((*NetIpRouteConfigSpecIpRouteSpec)(nil)).Elem() - minAPIVersionForType["NetIpRouteConfigSpecIpRouteSpec"] = "4.1" } // Protocol version independent reporting data object for IP stack. @@ -55535,7 +55273,6 @@ type NetIpStackInfo struct { func init() { t["NetIpStackInfo"] = reflect.TypeOf((*NetIpStackInfo)(nil)).Elem() - minAPIVersionForType["NetIpStackInfo"] = "4.1" } type NetIpStackInfoDefaultRouter struct { @@ -55598,7 +55335,6 @@ type NetIpStackInfoNetToMedia struct { func init() { t["NetIpStackInfoNetToMedia"] = reflect.TypeOf((*NetIpStackInfoNetToMedia)(nil)).Elem() - minAPIVersionForType["NetIpStackInfoNetToMedia"] = "4.1" } // The `NetStackInstanceProfile` data object represents a subprofile @@ -55616,7 +55352,6 @@ type NetStackInstanceProfile struct { func init() { t["NetStackInstanceProfile"] = reflect.TypeOf((*NetStackInstanceProfile)(nil)).Elem() - minAPIVersionForType["NetStackInstanceProfile"] = "5.5" } // A network copy of the file failed. @@ -55645,7 +55380,6 @@ type NetworkDisruptedAndConfigRolledBack struct { func init() { t["NetworkDisruptedAndConfigRolledBack"] = reflect.TypeOf((*NetworkDisruptedAndConfigRolledBack)(nil)).Elem() - minAPIVersionForType["NetworkDisruptedAndConfigRolledBack"] = "5.1" } type NetworkDisruptedAndConfigRolledBackFault NetworkDisruptedAndConfigRolledBack @@ -55666,7 +55400,6 @@ type NetworkEventArgument struct { func init() { t["NetworkEventArgument"] = reflect.TypeOf((*NetworkEventArgument)(nil)).Elem() - minAPIVersionForType["NetworkEventArgument"] = "4.0" } // This fault is thrown when an operation to configure a NAS volume fails @@ -55677,7 +55410,6 @@ type NetworkInaccessible struct { func init() { t["NetworkInaccessible"] = reflect.TypeOf((*NetworkInaccessible)(nil)).Elem() - minAPIVersionForType["NetworkInaccessible"] = "2.5 U2" } type NetworkInaccessibleFault NetworkInaccessible @@ -55697,7 +55429,6 @@ type NetworkPolicyProfile struct { func init() { t["NetworkPolicyProfile"] = reflect.TypeOf((*NetworkPolicyProfile)(nil)).Elem() - minAPIVersionForType["NetworkPolicyProfile"] = "4.0" } // The `NetworkProfile` data object contains a set of subprofiles for @@ -55756,19 +55487,18 @@ type NetworkProfile struct { // // Use the `NsxHostVNicProfile*.*NsxHostVNicProfile.key` property // to access a subprofile in the list. - NsxHostNic []NsxHostVNicProfile `xml:"nsxHostNic,omitempty" json:"nsxHostNic,omitempty" vim:"6.7"` + NsxHostNic []NsxHostVNicProfile `xml:"nsxHostNic,omitempty" json:"nsxHostNic,omitempty"` // List of NetStackInstance subprofiles. // // Use the `NetStackInstanceProfile.key` property to access // a subprofile in the list. - NetStackInstance []NetStackInstanceProfile `xml:"netStackInstance,omitempty" json:"netStackInstance,omitempty" vim:"5.5"` + NetStackInstance []NetStackInstanceProfile `xml:"netStackInstance,omitempty" json:"netStackInstance,omitempty"` // OpaqueSwitch subprofile. - OpaqueSwitch *OpaqueSwitchProfile `xml:"opaqueSwitch,omitempty" json:"opaqueSwitch,omitempty" vim:"7.0"` + OpaqueSwitch *OpaqueSwitchProfile `xml:"opaqueSwitch,omitempty" json:"opaqueSwitch,omitempty"` } func init() { t["NetworkProfile"] = reflect.TypeOf((*NetworkProfile)(nil)).Elem() - minAPIVersionForType["NetworkProfile"] = "4.0" } // The `NetworkProfileDnsConfigProfile` data object represents DNS configuration @@ -55783,7 +55513,6 @@ type NetworkProfileDnsConfigProfile struct { func init() { t["NetworkProfileDnsConfigProfile"] = reflect.TypeOf((*NetworkProfileDnsConfigProfile)(nil)).Elem() - minAPIVersionForType["NetworkProfileDnsConfigProfile"] = "4.0" } // This event records when networking configuration on the host @@ -55799,7 +55528,6 @@ type NetworkRollbackEvent struct { func init() { t["NetworkRollbackEvent"] = reflect.TypeOf((*NetworkRollbackEvent)(nil)).Elem() - minAPIVersionForType["NetworkRollbackEvent"] = "5.1" } // General information about a network. @@ -55818,12 +55546,12 @@ type NetworkSummary struct { // // Empty if the network is not associated with an // IP pool. - IpPoolName string `xml:"ipPoolName" json:"ipPoolName" vim:"4.0"` + IpPoolName string `xml:"ipPoolName" json:"ipPoolName"` // Identifier of the associated IP pool. // // Zero if the network is not associated // with an IP pool. - IpPoolId *int32 `xml:"ipPoolId" json:"ipPoolId,omitempty" vim:"5.1"` + IpPoolId *int32 `xml:"ipPoolId" json:"ipPoolId,omitempty"` } func init() { @@ -55846,7 +55574,6 @@ type NetworksMayNotBeTheSame struct { func init() { t["NetworksMayNotBeTheSame"] = reflect.TypeOf((*NetworksMayNotBeTheSame)(nil)).Elem() - minAPIVersionForType["NetworksMayNotBeTheSame"] = "2.5" } type NetworksMayNotBeTheSameFault NetworksMayNotBeTheSame @@ -55869,7 +55596,6 @@ type NicSettingMismatch struct { func init() { t["NicSettingMismatch"] = reflect.TypeOf((*NicSettingMismatch)(nil)).Elem() - minAPIVersionForType["NicSettingMismatch"] = "2.5" } type NicSettingMismatchFault NicSettingMismatch @@ -55931,7 +55657,6 @@ type NoAvailableIp struct { func init() { t["NoAvailableIp"] = reflect.TypeOf((*NoAvailableIp)(nil)).Elem() - minAPIVersionForType["NoAvailableIp"] = "4.0" } type NoAvailableIpFault NoAvailableIp @@ -55948,7 +55673,6 @@ type NoClientCertificate struct { func init() { t["NoClientCertificate"] = reflect.TypeOf((*NoClientCertificate)(nil)).Elem() - minAPIVersionForType["NoClientCertificate"] = "2.5" } type NoClientCertificateFault NoClientCertificate @@ -55967,7 +55691,6 @@ type NoCompatibleDatastore struct { func init() { t["NoCompatibleDatastore"] = reflect.TypeOf((*NoCompatibleDatastore)(nil)).Elem() - minAPIVersionForType["NoCompatibleDatastore"] = "5.0" } type NoCompatibleDatastoreFault NoCompatibleDatastore @@ -55987,7 +55710,6 @@ type NoCompatibleHardAffinityHost struct { func init() { t["NoCompatibleHardAffinityHost"] = reflect.TypeOf((*NoCompatibleHardAffinityHost)(nil)).Elem() - minAPIVersionForType["NoCompatibleHardAffinityHost"] = "4.1" } type NoCompatibleHardAffinityHostFault NoCompatibleHardAffinityHost @@ -56013,7 +55735,6 @@ type NoCompatibleHost struct { func init() { t["NoCompatibleHost"] = reflect.TypeOf((*NoCompatibleHost)(nil)).Elem() - minAPIVersionForType["NoCompatibleHost"] = "4.0" } type NoCompatibleHostFault BaseNoCompatibleHost @@ -56031,7 +55752,6 @@ type NoCompatibleHostWithAccessToDevice struct { func init() { t["NoCompatibleHostWithAccessToDevice"] = reflect.TypeOf((*NoCompatibleHostWithAccessToDevice)(nil)).Elem() - minAPIVersionForType["NoCompatibleHostWithAccessToDevice"] = "4.1" } type NoCompatibleHostWithAccessToDeviceFault NoCompatibleHostWithAccessToDevice @@ -56051,7 +55771,6 @@ type NoCompatibleSoftAffinityHost struct { func init() { t["NoCompatibleSoftAffinityHost"] = reflect.TypeOf((*NoCompatibleSoftAffinityHost)(nil)).Elem() - minAPIVersionForType["NoCompatibleSoftAffinityHost"] = "4.1" } type NoCompatibleSoftAffinityHostFault NoCompatibleSoftAffinityHost @@ -56069,7 +55788,6 @@ type NoConnectedDatastore struct { func init() { t["NoConnectedDatastore"] = reflect.TypeOf((*NoConnectedDatastore)(nil)).Elem() - minAPIVersionForType["NoConnectedDatastore"] = "5.0" } type NoConnectedDatastoreFault NoConnectedDatastore @@ -56085,7 +55803,6 @@ type NoDatastoresConfiguredEvent struct { func init() { t["NoDatastoresConfiguredEvent"] = reflect.TypeOf((*NoDatastoresConfiguredEvent)(nil)).Elem() - minAPIVersionForType["NoDatastoresConfiguredEvent"] = "2.5" } // This exception is thrown when a virtual machine @@ -56210,7 +55927,6 @@ type NoHostSuitableForFtSecondary struct { func init() { t["NoHostSuitableForFtSecondary"] = reflect.TypeOf((*NoHostSuitableForFtSecondary)(nil)).Elem() - minAPIVersionForType["NoHostSuitableForFtSecondary"] = "4.0" } type NoHostSuitableForFtSecondaryFault NoHostSuitableForFtSecondary @@ -56253,7 +55969,6 @@ type NoLicenseServerConfigured struct { func init() { t["NoLicenseServerConfigured"] = reflect.TypeOf((*NoLicenseServerConfigured)(nil)).Elem() - minAPIVersionForType["NoLicenseServerConfigured"] = "4.0" } type NoLicenseServerConfiguredFault NoLicenseServerConfigured @@ -56295,7 +56010,6 @@ type NoPeerHostFound struct { func init() { t["NoPeerHostFound"] = reflect.TypeOf((*NoPeerHostFound)(nil)).Elem() - minAPIVersionForType["NoPeerHostFound"] = "2.5" } type NoPeerHostFoundFault NoPeerHostFound @@ -56353,7 +56067,6 @@ type NoPermissionOnAD struct { func init() { t["NoPermissionOnAD"] = reflect.TypeOf((*NoPermissionOnAD)(nil)).Elem() - minAPIVersionForType["NoPermissionOnAD"] = "4.1" } type NoPermissionOnADFault NoPermissionOnAD @@ -56392,7 +56105,6 @@ type NoPermissionOnNasVolume struct { func init() { t["NoPermissionOnNasVolume"] = reflect.TypeOf((*NoPermissionOnNasVolume)(nil)).Elem() - minAPIVersionForType["NoPermissionOnNasVolume"] = "2.5 U2" } type NoPermissionOnNasVolumeFault NoPermissionOnNasVolume @@ -56409,7 +56121,6 @@ type NoSubjectName struct { func init() { t["NoSubjectName"] = reflect.TypeOf((*NoSubjectName)(nil)).Elem() - minAPIVersionForType["NoSubjectName"] = "2.5" } type NoSubjectNameFault NoSubjectName @@ -56426,7 +56137,6 @@ type NoVcManagedIpConfigured struct { func init() { t["NoVcManagedIpConfigured"] = reflect.TypeOf((*NoVcManagedIpConfigured)(nil)).Elem() - minAPIVersionForType["NoVcManagedIpConfigured"] = "4.0" } type NoVcManagedIpConfiguredFault NoVcManagedIpConfigured @@ -56459,7 +56169,6 @@ type NoVmInVApp struct { func init() { t["NoVmInVApp"] = reflect.TypeOf((*NoVmInVApp)(nil)).Elem() - minAPIVersionForType["NoVmInVApp"] = "4.0" } type NoVmInVAppFault NoVmInVApp @@ -56536,7 +56245,6 @@ type NodeDeploymentSpec struct { func init() { t["NodeDeploymentSpec"] = reflect.TypeOf((*NodeDeploymentSpec)(nil)).Elem() - minAPIVersionForType["NodeDeploymentSpec"] = "6.5" } // The NodeNetworkSpec class defines network specification of a node @@ -56553,7 +56261,6 @@ type NodeNetworkSpec struct { func init() { t["NodeNetworkSpec"] = reflect.TypeOf((*NodeNetworkSpec)(nil)).Elem() - minAPIVersionForType["NodeNetworkSpec"] = "6.5" } // Fault indicating that an operation must be executed by a @@ -56564,7 +56271,6 @@ type NonADUserRequired struct { func init() { t["NonADUserRequired"] = reflect.TypeOf((*NonADUserRequired)(nil)).Elem() - minAPIVersionForType["NonADUserRequired"] = "4.1" } type NonADUserRequiredFault NonADUserRequired @@ -56587,7 +56293,6 @@ type NonHomeRDMVMotionNotSupported struct { func init() { t["NonHomeRDMVMotionNotSupported"] = reflect.TypeOf((*NonHomeRDMVMotionNotSupported)(nil)).Elem() - minAPIVersionForType["NonHomeRDMVMotionNotSupported"] = "2.5" } type NonHomeRDMVMotionNotSupportedFault NonHomeRDMVMotionNotSupported @@ -56606,7 +56311,6 @@ type NonPersistentDisksNotSupported struct { func init() { t["NonPersistentDisksNotSupported"] = reflect.TypeOf((*NonPersistentDisksNotSupported)(nil)).Elem() - minAPIVersionForType["NonPersistentDisksNotSupported"] = "2.5" } type NonPersistentDisksNotSupportedFault NonPersistentDisksNotSupported @@ -56622,7 +56326,6 @@ type NonVIWorkloadDetectedOnDatastoreEvent struct { func init() { t["NonVIWorkloadDetectedOnDatastoreEvent"] = reflect.TypeOf((*NonVIWorkloadDetectedOnDatastoreEvent)(nil)).Elem() - minAPIVersionForType["NonVIWorkloadDetectedOnDatastoreEvent"] = "4.1" } // The host does not support VM that has VPX assigned prefix or ranged based @@ -56638,7 +56341,6 @@ type NonVmwareOuiMacNotSupportedHost struct { func init() { t["NonVmwareOuiMacNotSupportedHost"] = reflect.TypeOf((*NonVmwareOuiMacNotSupportedHost)(nil)).Elem() - minAPIVersionForType["NonVmwareOuiMacNotSupportedHost"] = "5.1" } type NonVmwareOuiMacNotSupportedHostFault NonVmwareOuiMacNotSupportedHost @@ -56655,7 +56357,6 @@ type NotADirectory struct { func init() { t["NotADirectory"] = reflect.TypeOf((*NotADirectory)(nil)).Elem() - minAPIVersionForType["NotADirectory"] = "5.0" } type NotADirectoryFault NotADirectory @@ -56672,7 +56373,6 @@ type NotAFile struct { func init() { t["NotAFile"] = reflect.TypeOf((*NotAFile)(nil)).Elem() - minAPIVersionForType["NotAFile"] = "5.0" } type NotAFileFault NotAFile @@ -56689,7 +56389,6 @@ type NotAuthenticated struct { func init() { t["NotAuthenticated"] = reflect.TypeOf((*NotAuthenticated)(nil)).Elem() - minAPIVersionForType["NotAuthenticated"] = "2.5" } type NotAuthenticatedFault NotAuthenticated @@ -56746,7 +56445,7 @@ type NotEnoughLogicalCpus struct { // The host that does not have enough logical CPUs. // // Refers instance of `HostSystem`. - Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty" vim:"2.5"` + Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty"` } func init() { @@ -56765,7 +56464,7 @@ type NotEnoughResourcesToStartVmEvent struct { VmEvent // The reason why the virtual machine could not be restarted - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"6.0"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { @@ -56855,7 +56554,6 @@ type NotSupportedDeviceForFT struct { func init() { t["NotSupportedDeviceForFT"] = reflect.TypeOf((*NotSupportedDeviceForFT)(nil)).Elem() - minAPIVersionForType["NotSupportedDeviceForFT"] = "4.1" } type NotSupportedDeviceForFTFault NotSupportedDeviceForFT @@ -56899,7 +56597,6 @@ type NotSupportedHostForChecksum struct { func init() { t["NotSupportedHostForChecksum"] = reflect.TypeOf((*NotSupportedHostForChecksum)(nil)).Elem() - minAPIVersionForType["NotSupportedHostForChecksum"] = "6.0" } type NotSupportedHostForChecksumFault NotSupportedHostForChecksum @@ -56918,7 +56615,6 @@ type NotSupportedHostForVFlash struct { func init() { t["NotSupportedHostForVFlash"] = reflect.TypeOf((*NotSupportedHostForVFlash)(nil)).Elem() - minAPIVersionForType["NotSupportedHostForVFlash"] = "5.5" } type NotSupportedHostForVFlashFault NotSupportedHostForVFlash @@ -56937,7 +56633,6 @@ type NotSupportedHostForVmcp struct { func init() { t["NotSupportedHostForVmcp"] = reflect.TypeOf((*NotSupportedHostForVmcp)(nil)).Elem() - minAPIVersionForType["NotSupportedHostForVmcp"] = "6.0" } type NotSupportedHostForVmcpFault NotSupportedHostForVmcp @@ -56956,7 +56651,6 @@ type NotSupportedHostForVmemFile struct { func init() { t["NotSupportedHostForVmemFile"] = reflect.TypeOf((*NotSupportedHostForVmemFile)(nil)).Elem() - minAPIVersionForType["NotSupportedHostForVmemFile"] = "6.0" } type NotSupportedHostForVmemFileFault NotSupportedHostForVmemFile @@ -56975,7 +56669,6 @@ type NotSupportedHostForVsan struct { func init() { t["NotSupportedHostForVsan"] = reflect.TypeOf((*NotSupportedHostForVsan)(nil)).Elem() - minAPIVersionForType["NotSupportedHostForVsan"] = "5.5" } type NotSupportedHostForVsanFault NotSupportedHostForVsan @@ -56992,7 +56685,6 @@ type NotSupportedHostInCluster struct { func init() { t["NotSupportedHostInCluster"] = reflect.TypeOf((*NotSupportedHostInCluster)(nil)).Elem() - minAPIVersionForType["NotSupportedHostInCluster"] = "4.0" } type NotSupportedHostInClusterFault BaseNotSupportedHostInCluster @@ -57016,7 +56708,6 @@ type NotSupportedHostInDvs struct { func init() { t["NotSupportedHostInDvs"] = reflect.TypeOf((*NotSupportedHostInDvs)(nil)).Elem() - minAPIVersionForType["NotSupportedHostInDvs"] = "4.1" } type NotSupportedHostInDvsFault NotSupportedHostInDvs @@ -57038,7 +56729,6 @@ type NotSupportedHostInHACluster struct { func init() { t["NotSupportedHostInHACluster"] = reflect.TypeOf((*NotSupportedHostInHACluster)(nil)).Elem() - minAPIVersionForType["NotSupportedHostInHACluster"] = "5.0" } type NotSupportedHostInHAClusterFault NotSupportedHostInHACluster @@ -57055,7 +56745,6 @@ type NotUserConfigurableProperty struct { func init() { t["NotUserConfigurableProperty"] = reflect.TypeOf((*NotUserConfigurableProperty)(nil)).Elem() - minAPIVersionForType["NotUserConfigurableProperty"] = "4.0" } type NotUserConfigurablePropertyFault NotUserConfigurableProperty @@ -57064,6 +56753,28 @@ func init() { t["NotUserConfigurablePropertyFault"] = reflect.TypeOf((*NotUserConfigurablePropertyFault)(nil)).Elem() } +type NotifyAffectedServices NotifyAffectedServicesRequestType + +func init() { + t["NotifyAffectedServices"] = reflect.TypeOf((*NotifyAffectedServices)(nil)).Elem() +} + +// The parameters of `HostCertificateManager.NotifyAffectedServices`. +type NotifyAffectedServicesRequestType struct { + This ManagedObjectReference `xml:"_this" json:"-"` + // list of services that need to be notified and no + // other service would be notified. if not provided all supported + // services would be notified. + Services []string `xml:"services,omitempty" json:"services,omitempty" vim:"8.0.1.0"` +} + +func init() { + t["NotifyAffectedServicesRequestType"] = reflect.TypeOf((*NotifyAffectedServicesRequestType)(nil)).Elem() +} + +type NotifyAffectedServicesResponse struct { +} + // The `NsxHostVNicProfile` data object is the base object // for host Virtual NIC connected to NSX logic switch subprofiles. // @@ -57081,7 +56792,6 @@ type NsxHostVNicProfile struct { func init() { t["NsxHostVNicProfile"] = reflect.TypeOf((*NsxHostVNicProfile)(nil)).Elem() - minAPIVersionForType["NsxHostVNicProfile"] = "6.7" } // The NumPortsProfile data object represents a @@ -57093,7 +56803,6 @@ type NumPortsProfile struct { func init() { t["NumPortsProfile"] = reflect.TypeOf((*NumPortsProfile)(nil)).Elem() - minAPIVersionForType["NumPortsProfile"] = "4.0" } // The host's software does not support enough cores per socket to @@ -57111,7 +56820,6 @@ type NumVirtualCoresPerSocketNotSupported struct { func init() { t["NumVirtualCoresPerSocketNotSupported"] = reflect.TypeOf((*NumVirtualCoresPerSocketNotSupported)(nil)).Elem() - minAPIVersionForType["NumVirtualCoresPerSocketNotSupported"] = "5.0" } type NumVirtualCoresPerSocketNotSupportedFault NumVirtualCoresPerSocketNotSupported @@ -57131,7 +56839,6 @@ type NumVirtualCpusExceedsLimit struct { func init() { t["NumVirtualCpusExceedsLimit"] = reflect.TypeOf((*NumVirtualCpusExceedsLimit)(nil)).Elem() - minAPIVersionForType["NumVirtualCpusExceedsLimit"] = "4.1" } type NumVirtualCpusExceedsLimitFault NumVirtualCpusExceedsLimit @@ -57155,7 +56862,6 @@ type NumVirtualCpusIncompatible struct { func init() { t["NumVirtualCpusIncompatible"] = reflect.TypeOf((*NumVirtualCpusIncompatible)(nil)).Elem() - minAPIVersionForType["NumVirtualCpusIncompatible"] = "4.0" } type NumVirtualCpusIncompatibleFault NumVirtualCpusIncompatible @@ -57199,7 +56905,6 @@ type NumericRange struct { func init() { t["NumericRange"] = reflect.TypeOf((*NumericRange)(nil)).Elem() - minAPIVersionForType["NumericRange"] = "4.0" } // Get detailed information of a nvdimm @@ -57236,7 +56941,6 @@ type NvdimmDimmInfo struct { func init() { t["NvdimmDimmInfo"] = reflect.TypeOf((*NvdimmDimmInfo)(nil)).Elem() - minAPIVersionForType["NvdimmDimmInfo"] = "6.7" } // A unique identifier used for namespaces @@ -57249,7 +56953,6 @@ type NvdimmGuid struct { func init() { t["NvdimmGuid"] = reflect.TypeOf((*NvdimmGuid)(nil)).Elem() - minAPIVersionForType["NvdimmGuid"] = "6.7" } // \\brief NVDIMM health information @@ -57305,7 +57008,6 @@ type NvdimmHealthInfo struct { func init() { t["NvdimmHealthInfo"] = reflect.TypeOf((*NvdimmHealthInfo)(nil)).Elem() - minAPIVersionForType["NvdimmHealthInfo"] = "6.7" } // Characteristics of an interleave set of a NVDIMM @@ -57336,7 +57038,6 @@ type NvdimmInterleaveSetInfo struct { func init() { t["NvdimmInterleaveSetInfo"] = reflect.TypeOf((*NvdimmInterleaveSetInfo)(nil)).Elem() - minAPIVersionForType["NvdimmInterleaveSetInfo"] = "6.7" } // Deprecated as of vSphere 6.7u1, use PMemNamespaceCreateReq. @@ -57379,7 +57080,6 @@ type NvdimmNamespaceCreateSpec struct { func init() { t["NvdimmNamespaceCreateSpec"] = reflect.TypeOf((*NvdimmNamespaceCreateSpec)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceCreateSpec"] = "6.7" } // Arguments for deleting a namespace @@ -57392,7 +57092,6 @@ type NvdimmNamespaceDeleteSpec struct { func init() { t["NvdimmNamespaceDeleteSpec"] = reflect.TypeOf((*NvdimmNamespaceDeleteSpec)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceDeleteSpec"] = "6.7" } // Detailed information about a particular namespace. @@ -57427,7 +57126,6 @@ type NvdimmNamespaceDetails struct { func init() { t["NvdimmNamespaceDetails"] = reflect.TypeOf((*NvdimmNamespaceDetails)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceDetails"] = "6.7.1" } // Deprecated as of vSphere 6.7u1, use NamespaceDetails. @@ -57479,7 +57177,6 @@ type NvdimmNamespaceInfo struct { func init() { t["NvdimmNamespaceInfo"] = reflect.TypeOf((*NvdimmNamespaceInfo)(nil)).Elem() - minAPIVersionForType["NvdimmNamespaceInfo"] = "6.7" } // Arguments for creating a persistent memory mode namespace @@ -57500,7 +57197,6 @@ type NvdimmPMemNamespaceCreateSpec struct { func init() { t["NvdimmPMemNamespaceCreateSpec"] = reflect.TypeOf((*NvdimmPMemNamespaceCreateSpec)(nil)).Elem() - minAPIVersionForType["NvdimmPMemNamespaceCreateSpec"] = "6.7.1" } // \\brief NVDIMM region information. @@ -57553,7 +57249,6 @@ type NvdimmRegionInfo struct { func init() { t["NvdimmRegionInfo"] = reflect.TypeOf((*NvdimmRegionInfo)(nil)).Elem() - minAPIVersionForType["NvdimmRegionInfo"] = "6.7" } // \\brief Get summary of nvdimm @@ -57580,7 +57275,6 @@ type NvdimmSummary struct { func init() { t["NvdimmSummary"] = reflect.TypeOf((*NvdimmSummary)(nil)).Elem() - minAPIVersionForType["NvdimmSummary"] = "6.7" } // This data object represents Non-Volatile DIMMs host @@ -57624,12 +57318,11 @@ type NvdimmSystemInfo struct { // // Namespace details is unset if the system does not support // PMem feature. - NsDetails []NvdimmNamespaceDetails `xml:"nsDetails,omitempty" json:"nsDetails,omitempty" vim:"6.7.1"` + NsDetails []NvdimmNamespaceDetails `xml:"nsDetails,omitempty" json:"nsDetails,omitempty"` } func init() { t["NvdimmSystemInfo"] = reflect.TypeOf((*NvdimmSystemInfo)(nil)).Elem() - minAPIVersionForType["NvdimmSystemInfo"] = "6.7" } // The `ObjectContent` data object type contains the @@ -57739,7 +57432,6 @@ type OpaqueNetworkCapability struct { func init() { t["OpaqueNetworkCapability"] = reflect.TypeOf((*OpaqueNetworkCapability)(nil)).Elem() - minAPIVersionForType["OpaqueNetworkCapability"] = "6.5" } // The summary of a opaque network. @@ -57756,7 +57448,6 @@ type OpaqueNetworkSummary struct { func init() { t["OpaqueNetworkSummary"] = reflect.TypeOf((*OpaqueNetworkSummary)(nil)).Elem() - minAPIVersionForType["OpaqueNetworkSummary"] = "5.5" } // This class describes an opaque network that a device backing @@ -57768,12 +57459,11 @@ type OpaqueNetworkTargetInfo struct { Network OpaqueNetworkSummary `xml:"network" json:"network"` // Indicates whether network bandwidth reservation is supported on // the opaque network - NetworkReservationSupported *bool `xml:"networkReservationSupported" json:"networkReservationSupported,omitempty" vim:"6.0"` + NetworkReservationSupported *bool `xml:"networkReservationSupported" json:"networkReservationSupported,omitempty"` } func init() { t["OpaqueNetworkTargetInfo"] = reflect.TypeOf((*OpaqueNetworkTargetInfo)(nil)).Elem() - minAPIVersionForType["OpaqueNetworkTargetInfo"] = "5.5" } // The `OpaqueSwitchProfile` data object represents opaque switch @@ -57788,7 +57478,6 @@ type OpaqueSwitchProfile struct { func init() { t["OpaqueSwitchProfile"] = reflect.TypeOf((*OpaqueSwitchProfile)(nil)).Elem() - minAPIVersionForType["OpaqueSwitchProfile"] = "7.0" } type OpenInventoryViewFolder OpenInventoryViewFolderRequestType @@ -57828,7 +57517,6 @@ type OperationDisabledByGuest struct { func init() { t["OperationDisabledByGuest"] = reflect.TypeOf((*OperationDisabledByGuest)(nil)).Elem() - minAPIVersionForType["OperationDisabledByGuest"] = "5.0" } type OperationDisabledByGuestFault OperationDisabledByGuest @@ -57851,7 +57539,6 @@ type OperationDisallowedOnHost struct { func init() { t["OperationDisallowedOnHost"] = reflect.TypeOf((*OperationDisallowedOnHost)(nil)).Elem() - minAPIVersionForType["OperationDisallowedOnHost"] = "5.0" } type OperationDisallowedOnHostFault OperationDisallowedOnHost @@ -57869,7 +57556,6 @@ type OperationNotSupportedByGuest struct { func init() { t["OperationNotSupportedByGuest"] = reflect.TypeOf((*OperationNotSupportedByGuest)(nil)).Elem() - minAPIVersionForType["OperationNotSupportedByGuest"] = "5.0" } type OperationNotSupportedByGuestFault OperationNotSupportedByGuest @@ -57912,7 +57598,6 @@ type OptionProfile struct { func init() { t["OptionProfile"] = reflect.TypeOf((*OptionProfile)(nil)).Elem() - minAPIVersionForType["OptionProfile"] = "4.0" } // The base data object type for all options. @@ -57960,11 +57645,11 @@ type OrAlarmExpression struct { AlarmExpression // List of alarm expressions that define the overall status of the alarm. - // - The state of the alarm expression is gray if all subexpressions are gray. - // Otherwise, gray subexpressions are ignored. - // - The state is red if any subexpression is red. - // - Otherwise, the state is yellow if any subexpression is yellow. - // - Otherwise, the state of the alarm expression is green. + // - The state of the alarm expression is gray if all subexpressions are gray. + // Otherwise, gray subexpressions are ignored. + // - The state is red if any subexpression is red. + // - Otherwise, the state is yellow if any subexpression is yellow. + // - Otherwise, the state of the alarm expression is green. Expression []BaseAlarmExpression `xml:"expression,typeattr" json:"expression"` } @@ -58002,7 +57687,6 @@ type OutOfSyncDvsHost struct { func init() { t["OutOfSyncDvsHost"] = reflect.TypeOf((*OutOfSyncDvsHost)(nil)).Elem() - minAPIVersionForType["OutOfSyncDvsHost"] = "4.0" } type OverwriteCustomizationSpec OverwriteCustomizationSpecRequestType @@ -58036,7 +57720,6 @@ type OvfAttribute struct { func init() { t["OvfAttribute"] = reflect.TypeOf((*OvfAttribute)(nil)).Elem() - minAPIVersionForType["OvfAttribute"] = "4.0" } type OvfAttributeFault BaseOvfAttribute @@ -58103,7 +57786,6 @@ type OvfConstraint struct { func init() { t["OvfConstraint"] = reflect.TypeOf((*OvfConstraint)(nil)).Elem() - minAPIVersionForType["OvfConstraint"] = "4.1" } type OvfConstraintFault BaseOvfConstraint @@ -58128,7 +57810,6 @@ type OvfConsumerCallbackFault struct { func init() { t["OvfConsumerCallbackFault"] = reflect.TypeOf((*OvfConsumerCallbackFault)(nil)).Elem() - minAPIVersionForType["OvfConsumerCallbackFault"] = "5.0" } type OvfConsumerCallbackFaultFault BaseOvfConsumerCallbackFault @@ -58147,7 +57828,6 @@ type OvfConsumerCommunicationError struct { func init() { t["OvfConsumerCommunicationError"] = reflect.TypeOf((*OvfConsumerCommunicationError)(nil)).Elem() - minAPIVersionForType["OvfConsumerCommunicationError"] = "5.0" } type OvfConsumerCommunicationErrorFault OvfConsumerCommunicationError @@ -58170,7 +57850,6 @@ type OvfConsumerFault struct { func init() { t["OvfConsumerFault"] = reflect.TypeOf((*OvfConsumerFault)(nil)).Elem() - minAPIVersionForType["OvfConsumerFault"] = "5.0" } type OvfConsumerFaultFault OvfConsumerFault @@ -58192,7 +57871,6 @@ type OvfConsumerInvalidSection struct { func init() { t["OvfConsumerInvalidSection"] = reflect.TypeOf((*OvfConsumerInvalidSection)(nil)).Elem() - minAPIVersionForType["OvfConsumerInvalidSection"] = "5.0" } type OvfConsumerInvalidSectionFault OvfConsumerInvalidSection @@ -58233,9 +57911,9 @@ type OvfConsumerOstNode struct { // // As dictated by OVF, this list is subject to the // following rules: - // - The Envelope node must have exactly one child. - // - VirtualSystemCollection nodes may have zero or more children. - // - VirtualSystem nodes must have no children. + // - The Envelope node must have exactly one child. + // - VirtualSystemCollection nodes may have zero or more children. + // - VirtualSystem nodes must have no children. Child []OvfConsumerOstNode `xml:"child,omitempty" json:"child,omitempty"` // The VM or vApp corresponding to this node. // @@ -58249,7 +57927,6 @@ type OvfConsumerOstNode struct { func init() { t["OvfConsumerOstNode"] = reflect.TypeOf((*OvfConsumerOstNode)(nil)).Elem() - minAPIVersionForType["OvfConsumerOstNode"] = "5.0" } // A self-contained OVF section @@ -58272,7 +57949,6 @@ type OvfConsumerOvfSection struct { func init() { t["OvfConsumerOvfSection"] = reflect.TypeOf((*OvfConsumerOvfSection)(nil)).Elem() - minAPIVersionForType["OvfConsumerOvfSection"] = "5.0" } // A fault type indicating that the power on operation failed. @@ -58289,7 +57965,6 @@ type OvfConsumerPowerOnFault struct { func init() { t["OvfConsumerPowerOnFault"] = reflect.TypeOf((*OvfConsumerPowerOnFault)(nil)).Elem() - minAPIVersionForType["OvfConsumerPowerOnFault"] = "5.0" } type OvfConsumerPowerOnFaultFault OvfConsumerPowerOnFault @@ -58311,7 +57986,6 @@ type OvfConsumerUndeclaredSection struct { func init() { t["OvfConsumerUndeclaredSection"] = reflect.TypeOf((*OvfConsumerUndeclaredSection)(nil)).Elem() - minAPIVersionForType["OvfConsumerUndeclaredSection"] = "5.0" } type OvfConsumerUndeclaredSectionFault OvfConsumerUndeclaredSection @@ -58330,7 +58004,6 @@ type OvfConsumerUndefinedPrefix struct { func init() { t["OvfConsumerUndefinedPrefix"] = reflect.TypeOf((*OvfConsumerUndefinedPrefix)(nil)).Elem() - minAPIVersionForType["OvfConsumerUndefinedPrefix"] = "5.0" } type OvfConsumerUndefinedPrefixFault OvfConsumerUndefinedPrefix @@ -58353,7 +58026,6 @@ type OvfConsumerValidationFault struct { func init() { t["OvfConsumerValidationFault"] = reflect.TypeOf((*OvfConsumerValidationFault)(nil)).Elem() - minAPIVersionForType["OvfConsumerValidationFault"] = "5.0" } type OvfConsumerValidationFaultFault OvfConsumerValidationFault @@ -58427,7 +58099,7 @@ type OvfCreateDescriptorParams struct { // Controls whether attached image files should be included in the descriptor. // // This applies to image files attached to VirtualCdrom and VirtualFloppy. - IncludeImageFiles *bool `xml:"includeImageFiles" json:"includeImageFiles,omitempty" vim:"4.1"` + IncludeImageFiles *bool `xml:"includeImageFiles" json:"includeImageFiles,omitempty"` // An optional argument for modifying the export process. // // The option is used to control what extra information that will be included in the @@ -58435,7 +58107,7 @@ type OvfCreateDescriptorParams struct { // // To get a list of supported keywords see `OvfManager.ovfExportOption`. Unknown // options will be ignored by the server. - ExportOption []string `xml:"exportOption,omitempty" json:"exportOption,omitempty" vim:"5.1"` + ExportOption []string `xml:"exportOption,omitempty" json:"exportOption,omitempty"` // Snapshot reference from which the OVF descriptor should be based. // // If this parameter is set, the OVF descriptor is based off the @@ -58447,12 +58119,11 @@ type OvfCreateDescriptorParams struct { // createDescriptor call. // // Refers instance of `VirtualMachineSnapshot`. - Snapshot *ManagedObjectReference `xml:"snapshot,omitempty" json:"snapshot,omitempty" vim:"5.5"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty" json:"snapshot,omitempty"` } func init() { t["OvfCreateDescriptorParams"] = reflect.TypeOf((*OvfCreateDescriptorParams)(nil)).Elem() - minAPIVersionForType["OvfCreateDescriptorParams"] = "4.0" } // The result of creating the OVF descriptor for the entity. @@ -58472,12 +58143,11 @@ type OvfCreateDescriptorResult struct { // warnings. Warning []LocalizedMethodFault `xml:"warning,omitempty" json:"warning,omitempty"` // Returns true if there are ISO or Floppy images attached to one or more VMs. - IncludeImageFiles *bool `xml:"includeImageFiles" json:"includeImageFiles,omitempty" vim:"4.1"` + IncludeImageFiles *bool `xml:"includeImageFiles" json:"includeImageFiles,omitempty"` } func init() { t["OvfCreateDescriptorResult"] = reflect.TypeOf((*OvfCreateDescriptorResult)(nil)).Elem() - minAPIVersionForType["OvfCreateDescriptorResult"] = "4.0" } // Parameters for deploying an OVF. @@ -58522,34 +58192,33 @@ type OvfCreateImportSpecParams struct { // // This can be used to distribute // a vApp across multiple resource pools (and create linked children). - ResourceMapping []OvfResourceMap `xml:"resourceMapping,omitempty" json:"resourceMapping,omitempty" vim:"4.1"` + ResourceMapping []OvfResourceMap `xml:"resourceMapping,omitempty" json:"resourceMapping,omitempty"` // An optional disk provisioning. // // If set, all the disks in the deployed OVF will // have get the same specified disk type (e.g., thin provisioned). // The valide values for disk provisioning are: - // - `monolithicSparse` - // - `monolithicFlat` - // - `twoGbMaxExtentSparse` - // - `twoGbMaxExtentFlat` - // - `thin` - // - `thick` - // - `sparse` - // - `flat` - // - `seSparse` + // - `monolithicSparse` + // - `monolithicFlat` + // - `twoGbMaxExtentSparse` + // - `twoGbMaxExtentFlat` + // - `thin` + // - `thick` + // - `sparse` + // - `flat` + // - `seSparse` // // See also `VirtualDiskMode_enum`. - DiskProvisioning string `xml:"diskProvisioning,omitempty" json:"diskProvisioning,omitempty" vim:"4.1"` + DiskProvisioning string `xml:"diskProvisioning,omitempty" json:"diskProvisioning,omitempty"` // The instantiation OST to configure OVF consumers. // // This is created by the client // from the annotated OST. See `OvfConsumer` for details. - InstantiationOst *OvfConsumerOstNode `xml:"instantiationOst,omitempty" json:"instantiationOst,omitempty" vim:"5.0"` + InstantiationOst *OvfConsumerOstNode `xml:"instantiationOst,omitempty" json:"instantiationOst,omitempty"` } func init() { t["OvfCreateImportSpecParams"] = reflect.TypeOf((*OvfCreateImportSpecParams)(nil)).Elem() - minAPIVersionForType["OvfCreateImportSpecParams"] = "4.0" } // The CreateImportSpecResult contains all information regarding the import that can @@ -58586,7 +58255,6 @@ type OvfCreateImportSpecResult struct { func init() { t["OvfCreateImportSpecResult"] = reflect.TypeOf((*OvfCreateImportSpecResult)(nil)).Elem() - minAPIVersionForType["OvfCreateImportSpecResult"] = "4.0" } // A deployment option as defined in the OVF specfication. @@ -58607,7 +58275,6 @@ type OvfDeploymentOption struct { func init() { t["OvfDeploymentOption"] = reflect.TypeOf((*OvfDeploymentOption)(nil)).Elem() - minAPIVersionForType["OvfDeploymentOption"] = "4.0" } type OvfDiskMappingNotFound struct { @@ -58637,7 +58304,6 @@ type OvfDiskOrderConstraint struct { func init() { t["OvfDiskOrderConstraint"] = reflect.TypeOf((*OvfDiskOrderConstraint)(nil)).Elem() - minAPIVersionForType["OvfDiskOrderConstraint"] = "4.1" } type OvfDiskOrderConstraintFault OvfDiskOrderConstraint @@ -58653,7 +58319,6 @@ type OvfDuplicateElement struct { func init() { t["OvfDuplicateElement"] = reflect.TypeOf((*OvfDuplicateElement)(nil)).Elem() - minAPIVersionForType["OvfDuplicateElement"] = "4.0" } type OvfDuplicateElementFault OvfDuplicateElement @@ -58672,7 +58337,6 @@ type OvfDuplicatedElementBoundary struct { func init() { t["OvfDuplicatedElementBoundary"] = reflect.TypeOf((*OvfDuplicatedElementBoundary)(nil)).Elem() - minAPIVersionForType["OvfDuplicatedElementBoundary"] = "4.0" } type OvfDuplicatedElementBoundaryFault OvfDuplicatedElementBoundary @@ -58693,7 +58357,6 @@ type OvfDuplicatedPropertyIdExport struct { func init() { t["OvfDuplicatedPropertyIdExport"] = reflect.TypeOf((*OvfDuplicatedPropertyIdExport)(nil)).Elem() - minAPIVersionForType["OvfDuplicatedPropertyIdExport"] = "5.0" } type OvfDuplicatedPropertyIdExportFault OvfDuplicatedPropertyIdExport @@ -58711,7 +58374,6 @@ type OvfDuplicatedPropertyIdImport struct { func init() { t["OvfDuplicatedPropertyIdImport"] = reflect.TypeOf((*OvfDuplicatedPropertyIdImport)(nil)).Elem() - minAPIVersionForType["OvfDuplicatedPropertyIdImport"] = "5.0" } type OvfDuplicatedPropertyIdImportFault OvfDuplicatedPropertyIdImport @@ -58730,7 +58392,6 @@ type OvfElement struct { func init() { t["OvfElement"] = reflect.TypeOf((*OvfElement)(nil)).Elem() - minAPIVersionForType["OvfElement"] = "4.0" } type OvfElementFault BaseOvfElement @@ -58751,7 +58412,6 @@ type OvfElementInvalidValue struct { func init() { t["OvfElementInvalidValue"] = reflect.TypeOf((*OvfElementInvalidValue)(nil)).Elem() - minAPIVersionForType["OvfElementInvalidValue"] = "4.0" } type OvfElementInvalidValueFault OvfElementInvalidValue @@ -58767,7 +58427,6 @@ type OvfExport struct { func init() { t["OvfExport"] = reflect.TypeOf((*OvfExport)(nil)).Elem() - minAPIVersionForType["OvfExport"] = "4.0" } // This fault is used if we fail to export an OVF package. @@ -58777,7 +58436,6 @@ type OvfExportFailed struct { func init() { t["OvfExportFailed"] = reflect.TypeOf((*OvfExportFailed)(nil)).Elem() - minAPIVersionForType["OvfExportFailed"] = "4.1" } type OvfExportFailedFault OvfExportFailed @@ -58878,7 +58536,6 @@ type OvfFault struct { func init() { t["OvfFault"] = reflect.TypeOf((*OvfFault)(nil)).Elem() - minAPIVersionForType["OvfFault"] = "4.0" } type OvfFaultFault BaseOvfFault @@ -58934,19 +58591,18 @@ type OvfFile struct { // Note that the "capacity" attribute is normally set to the capacity of the // corresponding `VirtualDisk`. Setting this variable // overrides the capacity from the VirtualDisk. - Capacity int64 `xml:"capacity,omitempty" json:"capacity,omitempty" vim:"4.1"` + Capacity int64 `xml:"capacity,omitempty" json:"capacity,omitempty"` // The populated size of the disk backed by this file. // // This should only be set if // the device backed by this file is a disk. This value will be written in the // "populatedSize" attribute of the corresponding "Disk" element in the OVF // descriptor. - PopulatedSize int64 `xml:"populatedSize,omitempty" json:"populatedSize,omitempty" vim:"4.1"` + PopulatedSize int64 `xml:"populatedSize,omitempty" json:"populatedSize,omitempty"` } func init() { t["OvfFile"] = reflect.TypeOf((*OvfFile)(nil)).Elem() - minAPIVersionForType["OvfFile"] = "4.0" } // An FileItem represents a file that must be uploaded by the caller when the @@ -58996,7 +58652,6 @@ type OvfFileItem struct { func init() { t["OvfFileItem"] = reflect.TypeOf((*OvfFileItem)(nil)).Elem() - minAPIVersionForType["OvfFileItem"] = "4.0" } type OvfHardwareCheck struct { @@ -59029,7 +58684,6 @@ type OvfHardwareExport struct { func init() { t["OvfHardwareExport"] = reflect.TypeOf((*OvfHardwareExport)(nil)).Elem() - minAPIVersionForType["OvfHardwareExport"] = "4.0" } type OvfHardwareExportFault BaseOvfHardwareExport @@ -59049,7 +58703,6 @@ type OvfHostResourceConstraint struct { func init() { t["OvfHostResourceConstraint"] = reflect.TypeOf((*OvfHostResourceConstraint)(nil)).Elem() - minAPIVersionForType["OvfHostResourceConstraint"] = "4.1" } type OvfHostResourceConstraintFault OvfHostResourceConstraint @@ -59088,7 +58741,6 @@ type OvfImport struct { func init() { t["OvfImport"] = reflect.TypeOf((*OvfImport)(nil)).Elem() - minAPIVersionForType["OvfImport"] = "4.0" } // This fault is used if we fail to deploy an OVF package. @@ -59098,7 +58750,6 @@ type OvfImportFailed struct { func init() { t["OvfImportFailed"] = reflect.TypeOf((*OvfImportFailed)(nil)).Elem() - minAPIVersionForType["OvfImportFailed"] = "4.1" } type OvfImportFailedFault OvfImportFailed @@ -59120,7 +58771,6 @@ type OvfInternalError struct { func init() { t["OvfInternalError"] = reflect.TypeOf((*OvfInternalError)(nil)).Elem() - minAPIVersionForType["OvfInternalError"] = "4.1" } type OvfInternalErrorFault OvfInternalError @@ -59139,7 +58789,6 @@ type OvfInvalidPackage struct { func init() { t["OvfInvalidPackage"] = reflect.TypeOf((*OvfInvalidPackage)(nil)).Elem() - minAPIVersionForType["OvfInvalidPackage"] = "4.0" } type OvfInvalidPackageFault BaseOvfInvalidPackage @@ -59158,7 +58807,6 @@ type OvfInvalidValue struct { func init() { t["OvfInvalidValue"] = reflect.TypeOf((*OvfInvalidValue)(nil)).Elem() - minAPIVersionForType["OvfInvalidValue"] = "4.0" } // If an malformed ovf:configuration attribute value is found in the @@ -59169,7 +58817,6 @@ type OvfInvalidValueConfiguration struct { func init() { t["OvfInvalidValueConfiguration"] = reflect.TypeOf((*OvfInvalidValueConfiguration)(nil)).Elem() - minAPIVersionForType["OvfInvalidValueConfiguration"] = "4.0" } type OvfInvalidValueConfigurationFault OvfInvalidValueConfiguration @@ -59185,7 +58832,6 @@ type OvfInvalidValueEmpty struct { func init() { t["OvfInvalidValueEmpty"] = reflect.TypeOf((*OvfInvalidValueEmpty)(nil)).Elem() - minAPIVersionForType["OvfInvalidValueEmpty"] = "4.0" } type OvfInvalidValueEmptyFault OvfInvalidValueEmpty @@ -59208,7 +58854,6 @@ type OvfInvalidValueFormatMalformed struct { func init() { t["OvfInvalidValueFormatMalformed"] = reflect.TypeOf((*OvfInvalidValueFormatMalformed)(nil)).Elem() - minAPIVersionForType["OvfInvalidValueFormatMalformed"] = "4.0" } type OvfInvalidValueFormatMalformedFault OvfInvalidValueFormatMalformed @@ -59225,7 +58870,6 @@ type OvfInvalidValueReference struct { func init() { t["OvfInvalidValueReference"] = reflect.TypeOf((*OvfInvalidValueReference)(nil)).Elem() - minAPIVersionForType["OvfInvalidValueReference"] = "4.0" } type OvfInvalidValueReferenceFault OvfInvalidValueReference @@ -59244,7 +58888,6 @@ type OvfInvalidVmName struct { func init() { t["OvfInvalidVmName"] = reflect.TypeOf((*OvfInvalidVmName)(nil)).Elem() - minAPIVersionForType["OvfInvalidVmName"] = "4.0" } type OvfInvalidVmNameFault OvfInvalidVmName @@ -59288,12 +58931,11 @@ type OvfManagerCommonParams struct { // // To get a list of supported keywords see `OvfManager.ovfImportOption`. Unknown // options will be ignored by the server. - ImportOption []string `xml:"importOption,omitempty" json:"importOption,omitempty" vim:"5.1"` + ImportOption []string `xml:"importOption,omitempty" json:"importOption,omitempty"` } func init() { t["OvfManagerCommonParams"] = reflect.TypeOf((*OvfManagerCommonParams)(nil)).Elem() - minAPIVersionForType["OvfManagerCommonParams"] = "4.0" } type OvfMappedOsId struct { @@ -59324,7 +58966,6 @@ type OvfMissingAttribute struct { func init() { t["OvfMissingAttribute"] = reflect.TypeOf((*OvfMissingAttribute)(nil)).Elem() - minAPIVersionForType["OvfMissingAttribute"] = "4.0" } type OvfMissingAttributeFault OvfMissingAttribute @@ -59340,7 +58981,6 @@ type OvfMissingElement struct { func init() { t["OvfMissingElement"] = reflect.TypeOf((*OvfMissingElement)(nil)).Elem() - minAPIVersionForType["OvfMissingElement"] = "4.0" } type OvfMissingElementFault BaseOvfMissingElement @@ -59359,7 +58999,6 @@ type OvfMissingElementNormalBoundary struct { func init() { t["OvfMissingElementNormalBoundary"] = reflect.TypeOf((*OvfMissingElementNormalBoundary)(nil)).Elem() - minAPIVersionForType["OvfMissingElementNormalBoundary"] = "4.0" } type OvfMissingElementNormalBoundaryFault OvfMissingElementNormalBoundary @@ -59381,7 +59020,6 @@ type OvfMissingHardware struct { func init() { t["OvfMissingHardware"] = reflect.TypeOf((*OvfMissingHardware)(nil)).Elem() - minAPIVersionForType["OvfMissingHardware"] = "4.0" } type OvfMissingHardwareFault OvfMissingHardware @@ -59400,7 +59038,6 @@ type OvfNetworkInfo struct { func init() { t["OvfNetworkInfo"] = reflect.TypeOf((*OvfNetworkInfo)(nil)).Elem() - minAPIVersionForType["OvfNetworkInfo"] = "4.0" } // A NetworkMapping is a choice made by the caller about which VI network to use for a @@ -59408,13 +59045,13 @@ func init() { type OvfNetworkMapping struct { DynamicData - Name string `xml:"name" json:"name"` + Name string `xml:"name" json:"name"` + // Refers instance of `Network`. Network ManagedObjectReference `xml:"network" json:"network"` } func init() { t["OvfNetworkMapping"] = reflect.TypeOf((*OvfNetworkMapping)(nil)).Elem() - minAPIVersionForType["OvfNetworkMapping"] = "4.0" } // The network mapping provided for OVF Import @@ -59425,7 +59062,6 @@ type OvfNetworkMappingNotSupported struct { func init() { t["OvfNetworkMappingNotSupported"] = reflect.TypeOf((*OvfNetworkMappingNotSupported)(nil)).Elem() - minAPIVersionForType["OvfNetworkMappingNotSupported"] = "5.1" } type OvfNetworkMappingNotSupportedFault OvfNetworkMappingNotSupported @@ -59441,7 +59077,6 @@ type OvfNoHostNic struct { func init() { t["OvfNoHostNic"] = reflect.TypeOf((*OvfNoHostNic)(nil)).Elem() - minAPIVersionForType["OvfNoHostNic"] = "4.0" } type OvfNoHostNicFault OvfNoHostNic @@ -59461,7 +59096,6 @@ type OvfNoSpaceOnController struct { func init() { t["OvfNoSpaceOnController"] = reflect.TypeOf((*OvfNoSpaceOnController)(nil)).Elem() - minAPIVersionForType["OvfNoSpaceOnController"] = "5.0" } type OvfNoSpaceOnControllerFault OvfNoSpaceOnController @@ -59500,7 +59134,6 @@ type OvfOptionInfo struct { func init() { t["OvfOptionInfo"] = reflect.TypeOf((*OvfOptionInfo)(nil)).Elem() - minAPIVersionForType["OvfOptionInfo"] = "5.1" } type OvfParseDescriptorParams struct { @@ -59571,12 +59204,12 @@ type OvfParseDescriptorResult struct { // entity with id = "vm1", would simply be "vm1". If the vm is // the child of a VirtualSystemCollection called "webTier", then // the path would be "webTier/vm". - EntityName []KeyValue `xml:"entityName,omitempty" json:"entityName,omitempty" vim:"4.1"` + EntityName []KeyValue `xml:"entityName,omitempty" json:"entityName,omitempty"` // The annotated OST for the OVF descriptor, generated by OVF // consumers. // // See `OvfConsumer` for details. - AnnotatedOst *OvfConsumerOstNode `xml:"annotatedOst,omitempty" json:"annotatedOst,omitempty" vim:"5.0"` + AnnotatedOst *OvfConsumerOstNode `xml:"annotatedOst,omitempty" json:"annotatedOst,omitempty"` // Errors that happened during processing. // // Something @@ -59610,7 +59243,6 @@ type OvfProperty struct { func init() { t["OvfProperty"] = reflect.TypeOf((*OvfProperty)(nil)).Elem() - minAPIVersionForType["OvfProperty"] = "4.0" } // VIM property type that can not be converted to OVF @@ -59625,7 +59257,6 @@ type OvfPropertyExport struct { func init() { t["OvfPropertyExport"] = reflect.TypeOf((*OvfPropertyExport)(nil)).Elem() - minAPIVersionForType["OvfPropertyExport"] = "4.0" } type OvfPropertyExportFault OvfPropertyExport @@ -59647,7 +59278,6 @@ type OvfPropertyNetwork struct { func init() { t["OvfPropertyNetwork"] = reflect.TypeOf((*OvfPropertyNetwork)(nil)).Elem() - minAPIVersionForType["OvfPropertyNetwork"] = "4.0" } // VIM property type that refers to a network that @@ -59662,7 +59292,6 @@ type OvfPropertyNetworkExport struct { func init() { t["OvfPropertyNetworkExport"] = reflect.TypeOf((*OvfPropertyNetworkExport)(nil)).Elem() - minAPIVersionForType["OvfPropertyNetworkExport"] = "5.0" } type OvfPropertyNetworkExportFault OvfPropertyNetworkExport @@ -59687,7 +59316,6 @@ type OvfPropertyQualifier struct { func init() { t["OvfPropertyQualifier"] = reflect.TypeOf((*OvfPropertyQualifier)(nil)).Elem() - minAPIVersionForType["OvfPropertyQualifier"] = "4.0" } // Indicate that a property qualifier was duplicated. @@ -59700,7 +59328,6 @@ type OvfPropertyQualifierDuplicate struct { func init() { t["OvfPropertyQualifierDuplicate"] = reflect.TypeOf((*OvfPropertyQualifierDuplicate)(nil)).Elem() - minAPIVersionForType["OvfPropertyQualifierDuplicate"] = "4.0" } type OvfPropertyQualifierDuplicateFault OvfPropertyQualifierDuplicate @@ -59725,7 +59352,6 @@ type OvfPropertyQualifierIgnored struct { func init() { t["OvfPropertyQualifierIgnored"] = reflect.TypeOf((*OvfPropertyQualifierIgnored)(nil)).Elem() - minAPIVersionForType["OvfPropertyQualifierIgnored"] = "4.0" } type OvfPropertyQualifierIgnoredFault OvfPropertyQualifierIgnored @@ -59741,7 +59367,6 @@ type OvfPropertyType struct { func init() { t["OvfPropertyType"] = reflect.TypeOf((*OvfPropertyType)(nil)).Elem() - minAPIVersionForType["OvfPropertyType"] = "4.0" } type OvfPropertyTypeFault OvfPropertyType @@ -59757,7 +59382,6 @@ type OvfPropertyValue struct { func init() { t["OvfPropertyValue"] = reflect.TypeOf((*OvfPropertyValue)(nil)).Elem() - minAPIVersionForType["OvfPropertyValue"] = "4.0" } type OvfPropertyValueFault OvfPropertyValue @@ -59810,7 +59434,6 @@ type OvfResourceMap struct { func init() { t["OvfResourceMap"] = reflect.TypeOf((*OvfResourceMap)(nil)).Elem() - minAPIVersionForType["OvfResourceMap"] = "4.1" } // A common base class to host all the OVF subsystems's system faults. @@ -59824,7 +59447,6 @@ type OvfSystemFault struct { func init() { t["OvfSystemFault"] = reflect.TypeOf((*OvfSystemFault)(nil)).Elem() - minAPIVersionForType["OvfSystemFault"] = "4.0" } type OvfSystemFaultFault BaseOvfSystemFault @@ -59843,7 +59465,6 @@ type OvfToXmlUnsupportedElement struct { func init() { t["OvfToXmlUnsupportedElement"] = reflect.TypeOf((*OvfToXmlUnsupportedElement)(nil)).Elem() - minAPIVersionForType["OvfToXmlUnsupportedElement"] = "4.0" } type OvfToXmlUnsupportedElementFault OvfToXmlUnsupportedElement @@ -59876,7 +59497,6 @@ type OvfUnexpectedElement struct { func init() { t["OvfUnexpectedElement"] = reflect.TypeOf((*OvfUnexpectedElement)(nil)).Elem() - minAPIVersionForType["OvfUnexpectedElement"] = "4.0" } type OvfUnexpectedElementFault OvfUnexpectedElement @@ -59950,7 +59570,6 @@ type OvfUnsupportedAttribute struct { func init() { t["OvfUnsupportedAttribute"] = reflect.TypeOf((*OvfUnsupportedAttribute)(nil)).Elem() - minAPIVersionForType["OvfUnsupportedAttribute"] = "4.0" } type OvfUnsupportedAttributeFault BaseOvfUnsupportedAttribute @@ -59969,7 +59588,6 @@ type OvfUnsupportedAttributeValue struct { func init() { t["OvfUnsupportedAttributeValue"] = reflect.TypeOf((*OvfUnsupportedAttributeValue)(nil)).Elem() - minAPIVersionForType["OvfUnsupportedAttributeValue"] = "4.0" } type OvfUnsupportedAttributeValueFault OvfUnsupportedAttributeValue @@ -60050,7 +59668,6 @@ type OvfUnsupportedDiskProvisioning struct { func init() { t["OvfUnsupportedDiskProvisioning"] = reflect.TypeOf((*OvfUnsupportedDiskProvisioning)(nil)).Elem() - minAPIVersionForType["OvfUnsupportedDiskProvisioning"] = "4.1" } type OvfUnsupportedDiskProvisioningFault OvfUnsupportedDiskProvisioning @@ -60069,7 +59686,6 @@ type OvfUnsupportedElement struct { func init() { t["OvfUnsupportedElement"] = reflect.TypeOf((*OvfUnsupportedElement)(nil)).Elem() - minAPIVersionForType["OvfUnsupportedElement"] = "4.0" } type OvfUnsupportedElementFault BaseOvfUnsupportedElement @@ -60089,7 +59705,6 @@ type OvfUnsupportedElementValue struct { func init() { t["OvfUnsupportedElementValue"] = reflect.TypeOf((*OvfUnsupportedElementValue)(nil)).Elem() - minAPIVersionForType["OvfUnsupportedElementValue"] = "4.0" } type OvfUnsupportedElementValueFault OvfUnsupportedElementValue @@ -60108,7 +59723,6 @@ type OvfUnsupportedPackage struct { func init() { t["OvfUnsupportedPackage"] = reflect.TypeOf((*OvfUnsupportedPackage)(nil)).Elem() - minAPIVersionForType["OvfUnsupportedPackage"] = "4.0" } type OvfUnsupportedPackageFault BaseOvfUnsupportedPackage @@ -60127,7 +59741,6 @@ type OvfUnsupportedSection struct { func init() { t["OvfUnsupportedSection"] = reflect.TypeOf((*OvfUnsupportedSection)(nil)).Elem() - minAPIVersionForType["OvfUnsupportedSection"] = "4.0" } type OvfUnsupportedSectionFault OvfUnsupportedSection @@ -60208,7 +59821,7 @@ type OvfValidateHostResult struct { // Non-fatal warnings from the validation. Warning []LocalizedMethodFault `xml:"warning,omitempty" json:"warning,omitempty"` // An array of the disk provisioning type supported by the target host system. - SupportedDiskProvisioning []string `xml:"supportedDiskProvisioning,omitempty" json:"supportedDiskProvisioning,omitempty" vim:"4.1"` + SupportedDiskProvisioning []string `xml:"supportedDiskProvisioning,omitempty" json:"supportedDiskProvisioning,omitempty"` } func init() { @@ -60222,7 +59835,6 @@ type OvfWrongElement struct { func init() { t["OvfWrongElement"] = reflect.TypeOf((*OvfWrongElement)(nil)).Elem() - minAPIVersionForType["OvfWrongElement"] = "4.0" } type OvfWrongElementFault OvfWrongElement @@ -60242,7 +59854,6 @@ type OvfWrongNamespace struct { func init() { t["OvfWrongNamespace"] = reflect.TypeOf((*OvfWrongNamespace)(nil)).Elem() - minAPIVersionForType["OvfWrongNamespace"] = "4.0" } type OvfWrongNamespaceFault OvfWrongNamespace @@ -60263,7 +59874,6 @@ type OvfXmlFormat struct { func init() { t["OvfXmlFormat"] = reflect.TypeOf((*OvfXmlFormat)(nil)).Elem() - minAPIVersionForType["OvfXmlFormat"] = "4.0" } type OvfXmlFormatFault OvfXmlFormat @@ -60290,7 +59900,6 @@ type ParaVirtualSCSIController struct { func init() { t["ParaVirtualSCSIController"] = reflect.TypeOf((*ParaVirtualSCSIController)(nil)).Elem() - minAPIVersionForType["ParaVirtualSCSIController"] = "2.5 U2" } // ParaVirtualSCSIControllerOption is the data object that contains @@ -60301,7 +59910,6 @@ type ParaVirtualSCSIControllerOption struct { func init() { t["ParaVirtualSCSIControllerOption"] = reflect.TypeOf((*ParaVirtualSCSIControllerOption)(nil)).Elem() - minAPIVersionForType["ParaVirtualSCSIControllerOption"] = "2.5 U2" } type ParseDescriptor ParseDescriptorRequestType @@ -60343,7 +59951,6 @@ type PassiveNodeDeploymentSpec struct { func init() { t["PassiveNodeDeploymentSpec"] = reflect.TypeOf((*PassiveNodeDeploymentSpec)(nil)).Elem() - minAPIVersionForType["PassiveNodeDeploymentSpec"] = "6.5" } // The PassiveNodeNetworkSpec class defines VCHA Failover and Cluster @@ -60361,7 +59968,6 @@ type PassiveNodeNetworkSpec struct { func init() { t["PassiveNodeNetworkSpec"] = reflect.TypeOf((*PassiveNodeNetworkSpec)(nil)).Elem() - minAPIVersionForType["PassiveNodeNetworkSpec"] = "6.5" } // Thrown when a server login fails due to expired user password. @@ -60371,7 +59977,6 @@ type PasswordExpired struct { func init() { t["PasswordExpired"] = reflect.TypeOf((*PasswordExpired)(nil)).Elem() - minAPIVersionForType["PasswordExpired"] = "6.7.2" } type PasswordExpiredFault PasswordExpired @@ -60391,7 +59996,6 @@ type PasswordField struct { func init() { t["PasswordField"] = reflect.TypeOf((*PasswordField)(nil)).Elem() - minAPIVersionForType["PasswordField"] = "4.0" } // This fault is thrown if a patch install fails because the patch @@ -60675,7 +60279,7 @@ type PerfCounterInfo struct { // performance data that is typically useful to administrators and // developers alike. The specific level of each counter is documented in // the respective counter-documentation pages, by group. See `PerformanceManager` for links to the counter group pages. - Level int32 `xml:"level,omitempty" json:"level,omitempty" vim:"2.5"` + Level int32 `xml:"level,omitempty" json:"level,omitempty"` // Minimum level at which the per device metrics of this type will be // collected by vCenter Server. // @@ -60685,7 +60289,7 @@ type PerfCounterInfo struct { // counter is collected at a certain level, the aggregate metric is also // calculated at that level, i.e., perDeviceLevel is greater than or // equal to level. - PerDeviceLevel int32 `xml:"perDeviceLevel,omitempty" json:"perDeviceLevel,omitempty" vim:"4.1"` + PerDeviceLevel int32 `xml:"perDeviceLevel,omitempty" json:"perDeviceLevel,omitempty"` // Deprecated as of VI API 2.5, this property is not used. // // The counter IDs associated with the same performance counter name for @@ -60865,7 +60469,7 @@ type PerfInterval struct { DynamicData // A unique identifier for the interval. - Key int32 `xml:"key" json:"key" vim:"2.5"` + Key int32 `xml:"key" json:"key"` // Number of seconds that data is sampled for this interval. // // The real-time @@ -60875,10 +60479,10 @@ type PerfInterval struct { // // A localized string that provides a // name for the interval. Names include: - // - "Past Day" - // - "Past Week" - // - "Past Month" - // - "Past Year" + // - "Past Day" + // - "Past Week" + // - "Past Month" + // - "Past Year" // // The name is not meaningful in terms of system behavior. That is, the // interval named “Past Week” works as it does because of its @@ -60894,7 +60498,7 @@ type PerfInterval struct { // property for this historical interval. For ESX, the value of this // property is null. For vCenter Server, the value will be a number from 1 // to 4. - Level int32 `xml:"level,omitempty" json:"level,omitempty" vim:"2.5"` + Level int32 `xml:"level,omitempty" json:"level,omitempty"` // Indicates whether the interval is enabled (true) or disabled (false). // // Disabling a historical interval prevents vCenter Server from collecting @@ -60903,7 +60507,7 @@ type PerfInterval struct { // For example, disabling the "Past Month" interval disables both "Past // Month" and "Past Year" intervals. The system will aggregate and retain // performance data using the "Past Day" and "Past Week" intervals only. - Enabled bool `xml:"enabled" json:"enabled" vim:"2.5"` + Enabled bool `xml:"enabled" json:"enabled"` } func init() { @@ -60930,19 +60534,19 @@ type PerfMetricId struct { // // It identifies the instance of the metric // with its source. This property may be empty. - // - For memory and aggregated statistics, this property is empty. - // - For host and virtual machine devices, this property contains the - // name of the device, such as the name of the host-bus adapter or - // the name of the virtual Ethernet adapter. For example, - // “mpx.vmhba33:C0:T0:L0” or - // “vmnic0:” - // - For a CPU, this property identifies the numeric position within - // the CPU core, such as 0, 1, 2, 3. - // - For a virtual disk, this property identifies the file type: - // - DISKFILE, for virtual machine base-disk files - // - SWAPFILE, for virtual machine swap files - // - DELTAFILE, for virtual machine snapshot overhead files - // - OTHERFILE, for all other files of a virtual machine + // - For memory and aggregated statistics, this property is empty. + // - For host and virtual machine devices, this property contains the + // name of the device, such as the name of the host-bus adapter or + // the name of the virtual Ethernet adapter. For example, + // “mpx.vmhba33:C0:T0:L0” or + // “vmnic0:” + // - For a CPU, this property identifies the numeric position within + // the CPU core, such as 0, 1, 2, 3. + // - For a virtual disk, this property identifies the file type: + // - DISKFILE, for virtual machine base-disk files + // - SWAPFILE, for virtual machine swap files + // - DELTAFILE, for virtual machine snapshot overhead files + // - OTHERFILE, for all other files of a virtual machine Instance string `xml:"instance" json:"instance"` } @@ -61091,8 +60695,8 @@ type PerfQuerySpec struct { // one of the historical intervals for this property. // // See `PerfInterval` for more information. - // - To obtain the greatest detail, use the provider’s `PerfProviderSummary.refreshRate` for this - // property. + // - To obtain the greatest detail, use the provider’s `PerfProviderSummary.refreshRate` for this + // property. IntervalId int32 `xml:"intervalId,omitempty" json:"intervalId,omitempty"` // The format to be used while returning the statistics. // @@ -61269,7 +60873,6 @@ type PerformanceManagerCounterLevelMapping struct { func init() { t["PerformanceManagerCounterLevelMapping"] = reflect.TypeOf((*PerformanceManagerCounterLevelMapping)(nil)).Elem() - minAPIVersionForType["PerformanceManagerCounterLevelMapping"] = "4.1" } // Data object to capture all information needed to @@ -61287,7 +60890,6 @@ type PerformanceStatisticsDescription struct { func init() { t["PerformanceStatisticsDescription"] = reflect.TypeOf((*PerformanceStatisticsDescription)(nil)).Elem() - minAPIVersionForType["PerformanceStatisticsDescription"] = "4.0" } // This data object type provides assignment of some role access to @@ -61369,7 +60971,6 @@ type PermissionProfile struct { func init() { t["PermissionProfile"] = reflect.TypeOf((*PermissionProfile)(nil)).Elem() - minAPIVersionForType["PermissionProfile"] = "4.1" } // This event records the removal of a permission. @@ -61390,9 +60991,9 @@ type PermissionUpdatedEvent struct { // Whether or not the permission applies to sub-entities. Propagate bool `xml:"propagate" json:"propagate"` // The previous associated role. - PrevRole *RoleEventArgument `xml:"prevRole,omitempty" json:"prevRole,omitempty" vim:"6.5"` + PrevRole *RoleEventArgument `xml:"prevRole,omitempty" json:"prevRole,omitempty"` // Previous propogate value. - PrevPropagate *bool `xml:"prevPropagate" json:"prevPropagate,omitempty" vim:"6.5"` + PrevPropagate *bool `xml:"prevPropagate" json:"prevPropagate,omitempty"` } func init() { @@ -61454,12 +61055,12 @@ type PhysicalNic struct { // The specification of the physical network adapter. Spec PhysicalNicSpec `xml:"spec" json:"spec"` // Flag indicating whether the NIC is wake-on-LAN capable - WakeOnLanSupported bool `xml:"wakeOnLanSupported" json:"wakeOnLanSupported" vim:"2.5"` + WakeOnLanSupported bool `xml:"wakeOnLanSupported" json:"wakeOnLanSupported"` // The media access control (MAC) address of the physical // network adapter. - Mac string `xml:"mac" json:"mac" vim:"2.5"` + Mac string `xml:"mac" json:"mac"` // The FCoE configuration of the physical network adapter. - FcoeConfiguration *FcoeConfig `xml:"fcoeConfiguration,omitempty" json:"fcoeConfiguration,omitempty" vim:"5.0"` + FcoeConfiguration *FcoeConfig `xml:"fcoeConfiguration,omitempty" json:"fcoeConfiguration,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer supported and // there is no replacement. // @@ -61474,7 +61075,7 @@ type PhysicalNic struct { // the NIC capability. // // See also `HostCapability.vmDirectPathGen2Supported`. - VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported" json:"vmDirectPathGen2Supported,omitempty" vim:"4.1"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported" json:"vmDirectPathGen2Supported,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer supported and // there is no replacement. // @@ -61484,28 +61085,28 @@ type PhysicalNic struct { // // A mode may require that the associated vSphere Distributed Switch have // a particular ProductSpec in order for network passthrough to be possible. - VmDirectPathGen2SupportedMode string `xml:"vmDirectPathGen2SupportedMode,omitempty" json:"vmDirectPathGen2SupportedMode,omitempty" vim:"4.1"` + VmDirectPathGen2SupportedMode string `xml:"vmDirectPathGen2SupportedMode,omitempty" json:"vmDirectPathGen2SupportedMode,omitempty"` // Flag indicating whether the NIC allows resource pool based scheduling // for network I/O control. - ResourcePoolSchedulerAllowed *bool `xml:"resourcePoolSchedulerAllowed" json:"resourcePoolSchedulerAllowed,omitempty" vim:"4.1"` + ResourcePoolSchedulerAllowed *bool `xml:"resourcePoolSchedulerAllowed" json:"resourcePoolSchedulerAllowed,omitempty"` // If `PhysicalNic.resourcePoolSchedulerAllowed` is false, this property // advertises the reason for disallowing resource scheduling on // this NIC. // // The reasons may be one of // `PhysicalNicResourcePoolSchedulerDisallowedReason_enum` - ResourcePoolSchedulerDisallowedReason []string `xml:"resourcePoolSchedulerDisallowedReason,omitempty" json:"resourcePoolSchedulerDisallowedReason,omitempty" vim:"4.1"` + ResourcePoolSchedulerDisallowedReason []string `xml:"resourcePoolSchedulerDisallowedReason,omitempty" json:"resourcePoolSchedulerDisallowedReason,omitempty"` // If set the flag indicates if the physical network adapter supports // autonegotiate. - AutoNegotiateSupported *bool `xml:"autoNegotiateSupported" json:"autoNegotiateSupported,omitempty" vim:"4.1"` + AutoNegotiateSupported *bool `xml:"autoNegotiateSupported" json:"autoNegotiateSupported,omitempty"` // If set the flag indicates whether a physical nic supports Enhanced // Networking Stack driver - EnhancedNetworkingStackSupported *bool `xml:"enhancedNetworkingStackSupported" json:"enhancedNetworkingStackSupported,omitempty" vim:"6.7"` + EnhancedNetworkingStackSupported *bool `xml:"enhancedNetworkingStackSupported" json:"enhancedNetworkingStackSupported,omitempty"` // If set the flag indicates whether a physical nic supports Enhanced // Networking Stack interrupt mode - EnsInterruptSupported *bool `xml:"ensInterruptSupported" json:"ensInterruptSupported,omitempty" vim:"7.0"` + EnsInterruptSupported *bool `xml:"ensInterruptSupported" json:"ensInterruptSupported,omitempty"` // Associated RDMA device, if any. - RdmaDevice string `xml:"rdmaDevice,omitempty" json:"rdmaDevice,omitempty" vim:"7.0"` + RdmaDevice string `xml:"rdmaDevice,omitempty" json:"rdmaDevice,omitempty"` // The identifier of the DPU by which the physical NIC is backed. // // When physical NIC is not backed by DPU, dpuId will be unset. @@ -61550,7 +61151,6 @@ type PhysicalNicCdpDeviceCapability struct { func init() { t["PhysicalNicCdpDeviceCapability"] = reflect.TypeOf((*PhysicalNicCdpDeviceCapability)(nil)).Elem() - minAPIVersionForType["PhysicalNicCdpDeviceCapability"] = "2.5" } // CDP (Cisco Discovery Protocol) is a link level protocol that allows @@ -61645,7 +61245,6 @@ type PhysicalNicCdpInfo struct { func init() { t["PhysicalNicCdpInfo"] = reflect.TypeOf((*PhysicalNicCdpInfo)(nil)).Elem() - minAPIVersionForType["PhysicalNicCdpInfo"] = "2.5" } // The configuration of the physical network adapter containing @@ -61701,7 +61300,7 @@ type PhysicalNicHintInfo struct { // CDP-awared device or CDP is not enabled on the device, this // property will be unset. // `PhysicalNicCdpInfo` - ConnectedSwitchPort *PhysicalNicCdpInfo `xml:"connectedSwitchPort,omitempty" json:"connectedSwitchPort,omitempty" vim:"2.5"` + ConnectedSwitchPort *PhysicalNicCdpInfo `xml:"connectedSwitchPort,omitempty" json:"connectedSwitchPort,omitempty"` // If the uplink directly connects to an LLDP-aware network device and // the device's LLDP broadcast is enabled, this property will be set to // return the LLDP information that is received on this physical network @@ -61709,7 +61308,7 @@ type PhysicalNicHintInfo struct { // // If the uplink is not connecting to a LLDP-aware device or // LLDP is not enabled on the device, this property will be unset. - LldpInfo *LinkLayerDiscoveryProtocolInfo `xml:"lldpInfo,omitempty" json:"lldpInfo,omitempty" vim:"5.0"` + LldpInfo *LinkLayerDiscoveryProtocolInfo `xml:"lldpInfo,omitempty" json:"lldpInfo,omitempty"` } func init() { @@ -61778,7 +61377,6 @@ type PhysicalNicProfile struct { func init() { t["PhysicalNicProfile"] = reflect.TypeOf((*PhysicalNicProfile)(nil)).Elem() - minAPIVersionForType["PhysicalNicProfile"] = "4.0" } // This data object type describes the physical network adapter specification @@ -61801,16 +61399,30 @@ type PhysicalNicSpec struct { LinkSpeed *PhysicalNicLinkInfo `xml:"linkSpeed,omitempty" json:"linkSpeed,omitempty"` // If set the flag indicates if the physical network adapter is // configured for Enhanced Networking Stack - EnableEnhancedNetworkingStack *bool `xml:"enableEnhancedNetworkingStack" json:"enableEnhancedNetworkingStack,omitempty" vim:"6.7"` + EnableEnhancedNetworkingStack *bool `xml:"enableEnhancedNetworkingStack" json:"enableEnhancedNetworkingStack,omitempty"` // If set the flag indicates if the physical network adapter is // configured for Enhanced Networking Stack interrupt mode - EnsInterruptEnabled *bool `xml:"ensInterruptEnabled" json:"ensInterruptEnabled,omitempty" vim:"7.0"` + EnsInterruptEnabled *bool `xml:"ensInterruptEnabled" json:"ensInterruptEnabled,omitempty"` } func init() { t["PhysicalNicSpec"] = reflect.TypeOf((*PhysicalNicSpec)(nil)).Elem() } +// Specifies SSL policy to trust a pinned SSL certificate. +type PinnedCertificate struct { + IoFilterManagerSslTrust + + // PEM-encoded pinned SSL certificate of the server that needs to be + // trusted. + SslCertificate string `xml:"sslCertificate" json:"sslCertificate"` +} + +func init() { + t["PinnedCertificate"] = reflect.TypeOf((*PinnedCertificate)(nil)).Elem() + minAPIVersionForType["PinnedCertificate"] = "8.0.3.0" +} + type PlaceVm PlaceVmRequestType func init() { @@ -61862,7 +61474,6 @@ type PlacementAction struct { func init() { t["PlacementAction"] = reflect.TypeOf((*PlacementAction)(nil)).Elem() - minAPIVersionForType["PlacementAction"] = "6.0" } // The `PlacementAffinityRule` data object specifies @@ -61891,7 +61502,6 @@ type PlacementAffinityRule struct { func init() { t["PlacementAffinityRule"] = reflect.TypeOf((*PlacementAffinityRule)(nil)).Elem() - minAPIVersionForType["PlacementAffinityRule"] = "6.0" } // PlacementRankResult is the class of the result returned by @@ -61925,7 +61535,6 @@ type PlacementRankResult struct { func init() { t["PlacementRankResult"] = reflect.TypeOf((*PlacementRankResult)(nil)).Elem() - minAPIVersionForType["PlacementRankResult"] = "6.0" } // PlacementRankSpec encapsulates all of the inputs passed to @@ -61947,7 +61556,6 @@ type PlacementRankSpec struct { func init() { t["PlacementRankSpec"] = reflect.TypeOf((*PlacementRankSpec)(nil)).Elem() - minAPIVersionForType["PlacementRankSpec"] = "6.0" } // `ClusterComputeResource.PlaceVm` method can invoke DRS @@ -61967,7 +61575,6 @@ type PlacementResult struct { func init() { t["PlacementResult"] = reflect.TypeOf((*PlacementResult)(nil)).Elem() - minAPIVersionForType["PlacementResult"] = "6.0" } // PlacementSpec encapsulates all of the information passed to the @@ -62084,7 +61691,6 @@ type PlacementSpec struct { func init() { t["PlacementSpec"] = reflect.TypeOf((*PlacementSpec)(nil)).Elem() - minAPIVersionForType["PlacementSpec"] = "6.0" } // A PlatformConfigFault is a catch-all fault indicating that some error has @@ -62128,7 +61734,6 @@ type PnicUplinkProfile struct { func init() { t["PnicUplinkProfile"] = reflect.TypeOf((*PnicUplinkProfile)(nil)).Elem() - minAPIVersionForType["PnicUplinkProfile"] = "4.0" } // The disk locator class. @@ -62149,12 +61754,11 @@ type PodDiskLocator struct { // interact with it. // This is an optional parameter and if user doesn't specify profile, // the default behavior will apply. - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty" vim:"5.5"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty"` } func init() { t["PodDiskLocator"] = reflect.TypeOf((*PodDiskLocator)(nil)).Elem() - minAPIVersionForType["PodDiskLocator"] = "5.0" } // An entry containing storage DRS configuration, runtime @@ -62183,7 +61787,6 @@ type PodStorageDrsEntry struct { func init() { t["PodStorageDrsEntry"] = reflect.TypeOf((*PodStorageDrsEntry)(nil)).Elem() - minAPIVersionForType["PodStorageDrsEntry"] = "5.0" } // The `PolicyOption` data object represents one or more configuration @@ -62211,7 +61814,6 @@ type PolicyOption struct { func init() { t["PolicyOption"] = reflect.TypeOf((*PolicyOption)(nil)).Elem() - minAPIVersionForType["PolicyOption"] = "4.0" } // `PortGroupProfile` is the base class for the different port group @@ -62233,7 +61835,6 @@ type PortGroupProfile struct { func init() { t["PortGroupProfile"] = reflect.TypeOf((*PortGroupProfile)(nil)).Elem() - minAPIVersionForType["PortGroupProfile"] = "4.0" } // Searching for users and groups on POSIX systems provides @@ -62405,7 +62006,6 @@ type PowerOnFtSecondaryFailed struct { func init() { t["PowerOnFtSecondaryFailed"] = reflect.TypeOf((*PowerOnFtSecondaryFailed)(nil)).Elem() - minAPIVersionForType["PowerOnFtSecondaryFailed"] = "4.0" } type PowerOnFtSecondaryFailedFault PowerOnFtSecondaryFailed @@ -62434,7 +62034,6 @@ type PowerOnFtSecondaryTimedout struct { func init() { t["PowerOnFtSecondaryTimedout"] = reflect.TypeOf((*PowerOnFtSecondaryTimedout)(nil)).Elem() - minAPIVersionForType["PowerOnFtSecondaryTimedout"] = "4.0" } type PowerOnFtSecondaryTimedoutFault PowerOnFtSecondaryTimedout @@ -62456,7 +62055,7 @@ type PowerOnMultiVMRequestType struct { // for this power-on session. The names and values of the // options are defined in // `ClusterPowerOnVmOption_enum`. - Option []BaseOptionValue `xml:"option,omitempty,typeattr" json:"option,omitempty" vim:"4.1"` + Option []BaseOptionValue `xml:"option,omitempty,typeattr" json:"option,omitempty"` } func init() { @@ -62530,7 +62129,6 @@ type PowerSystemCapability struct { func init() { t["PowerSystemCapability"] = reflect.TypeOf((*PowerSystemCapability)(nil)).Elem() - minAPIVersionForType["PowerSystemCapability"] = "4.1" } // Power System Info data object. @@ -62548,7 +62146,6 @@ type PowerSystemInfo struct { func init() { t["PowerSystemInfo"] = reflect.TypeOf((*PowerSystemInfo)(nil)).Elem() - minAPIVersionForType["PowerSystemInfo"] = "4.1" } // The parameters of `HostSystem.PowerUpHostFromStandBy_Task`. @@ -62604,7 +62201,6 @@ type PrivilegeAvailability struct { func init() { t["PrivilegeAvailability"] = reflect.TypeOf((*PrivilegeAvailability)(nil)).Elem() - minAPIVersionForType["PrivilegeAvailability"] = "5.5" } // Describes a basic privilege policy. @@ -62623,7 +62219,6 @@ type PrivilegePolicyDef struct { func init() { t["PrivilegePolicyDef"] = reflect.TypeOf((*PrivilegePolicyDef)(nil)).Elem() - minAPIVersionForType["PrivilegePolicyDef"] = "2.5" } // ProductComponentInfo data object type describes installed components. @@ -62652,7 +62247,6 @@ type ProductComponentInfo struct { func init() { t["ProductComponentInfo"] = reflect.TypeOf((*ProductComponentInfo)(nil)).Elem() - minAPIVersionForType["ProductComponentInfo"] = "2.5" } // DataObject which represents an ApplyProfile element. @@ -62670,7 +62264,6 @@ type ProfileApplyProfileElement struct { func init() { t["ProfileApplyProfileElement"] = reflect.TypeOf((*ProfileApplyProfileElement)(nil)).Elem() - minAPIVersionForType["ProfileApplyProfileElement"] = "5.0" } // The `ProfileApplyProfileProperty` data object defines one or more subprofiles. @@ -62687,7 +62280,6 @@ type ProfileApplyProfileProperty struct { func init() { t["ProfileApplyProfileProperty"] = reflect.TypeOf((*ProfileApplyProfileProperty)(nil)).Elem() - minAPIVersionForType["ProfileApplyProfileProperty"] = "5.0" } // This event records that a Profile was associated with a managed entitiy. @@ -62697,7 +62289,6 @@ type ProfileAssociatedEvent struct { func init() { t["ProfileAssociatedEvent"] = reflect.TypeOf((*ProfileAssociatedEvent)(nil)).Elem() - minAPIVersionForType["ProfileAssociatedEvent"] = "4.0" } // This event records that the profile has beed edited @@ -62707,7 +62298,6 @@ type ProfileChangedEvent struct { func init() { t["ProfileChangedEvent"] = reflect.TypeOf((*ProfileChangedEvent)(nil)).Elem() - minAPIVersionForType["ProfileChangedEvent"] = "4.0" } // DataObject to Compose expressions. @@ -62734,7 +62324,6 @@ type ProfileCompositeExpression struct { func init() { t["ProfileCompositeExpression"] = reflect.TypeOf((*ProfileCompositeExpression)(nil)).Elem() - minAPIVersionForType["ProfileCompositeExpression"] = "4.0" } // The `ProfileCompositePolicyOptionMetadata` data object represents the metadata information @@ -62758,7 +62347,6 @@ type ProfileCompositePolicyOptionMetadata struct { func init() { t["ProfileCompositePolicyOptionMetadata"] = reflect.TypeOf((*ProfileCompositePolicyOptionMetadata)(nil)).Elem() - minAPIVersionForType["ProfileCompositePolicyOptionMetadata"] = "4.0" } type ProfileConfigInfo struct { @@ -62790,7 +62378,6 @@ type ProfileCreateSpec struct { func init() { t["ProfileCreateSpec"] = reflect.TypeOf((*ProfileCreateSpec)(nil)).Elem() - minAPIVersionForType["ProfileCreateSpec"] = "4.0" } // This event records that a Profile was created. @@ -62800,7 +62387,6 @@ type ProfileCreatedEvent struct { func init() { t["ProfileCreatedEvent"] = reflect.TypeOf((*ProfileCreatedEvent)(nil)).Elem() - minAPIVersionForType["ProfileCreatedEvent"] = "4.0" } // The `ProfileDeferredPolicyOptionParameter` data object contains @@ -62828,7 +62414,6 @@ type ProfileDeferredPolicyOptionParameter struct { func init() { t["ProfileDeferredPolicyOptionParameter"] = reflect.TypeOf((*ProfileDeferredPolicyOptionParameter)(nil)).Elem() - minAPIVersionForType["ProfileDeferredPolicyOptionParameter"] = "4.0" } // The `ProfileDescription` data object describes a profile. @@ -62844,7 +62429,6 @@ type ProfileDescription struct { func init() { t["ProfileDescription"] = reflect.TypeOf((*ProfileDescription)(nil)).Elem() - minAPIVersionForType["ProfileDescription"] = "4.0" } // The `ProfileDescriptionSection` data object @@ -62861,7 +62445,6 @@ type ProfileDescriptionSection struct { func init() { t["ProfileDescriptionSection"] = reflect.TypeOf((*ProfileDescriptionSection)(nil)).Elem() - minAPIVersionForType["ProfileDescriptionSection"] = "4.0" } // This event records that a Profile was dissociated from a managed entity @@ -62871,7 +62454,6 @@ type ProfileDissociatedEvent struct { func init() { t["ProfileDissociatedEvent"] = reflect.TypeOf((*ProfileDissociatedEvent)(nil)).Elem() - minAPIVersionForType["ProfileDissociatedEvent"] = "4.0" } // This event records a Profile specific event. @@ -62884,20 +62466,19 @@ type ProfileEvent struct { func init() { t["ProfileEvent"] = reflect.TypeOf((*ProfileEvent)(nil)).Elem() - minAPIVersionForType["ProfileEvent"] = "4.0" } // The event argument is a Profile object type ProfileEventArgument struct { EventArgument + // Refers instance of `Profile`. Profile ManagedObjectReference `xml:"profile" json:"profile"` Name string `xml:"name" json:"name"` } func init() { t["ProfileEventArgument"] = reflect.TypeOf((*ProfileEventArgument)(nil)).Elem() - minAPIVersionForType["ProfileEventArgument"] = "4.0" } // The `ProfileExecuteError` data object @@ -62913,7 +62494,6 @@ type ProfileExecuteError struct { func init() { t["ProfileExecuteError"] = reflect.TypeOf((*ProfileExecuteError)(nil)).Elem() - minAPIVersionForType["ProfileExecuteError"] = "4.0" } // The `ProfileExecuteResult` data object contains the results from a @@ -62972,10 +62552,10 @@ type ProfileExecuteResult struct { // When `HostProfile.ExecuteHostProfile` returns a success status, // the requireInput list contains the complete list of parameters, // consisting of the following data: - // - Deferred parameter values resolved through successive calls to - // `HostProfile.ExecuteHostProfile`. - // - Default parameter values from the host configuration. - // - User-specified values that override the defaults. + // - Deferred parameter values resolved through successive calls to + // `HostProfile.ExecuteHostProfile`. + // - Default parameter values from the host configuration. + // - User-specified values that override the defaults. // // You can specify the returned requireInput list in the // userInput parameter to the @@ -62991,7 +62571,6 @@ type ProfileExecuteResult struct { func init() { t["ProfileExecuteResult"] = reflect.TypeOf((*ProfileExecuteResult)(nil)).Elem() - minAPIVersionForType["ProfileExecuteResult"] = "4.0" } type ProfileExpression struct { @@ -63027,7 +62606,6 @@ type ProfileExpressionMetadata struct { func init() { t["ProfileExpressionMetadata"] = reflect.TypeOf((*ProfileExpressionMetadata)(nil)).Elem() - minAPIVersionForType["ProfileExpressionMetadata"] = "4.0" } // This data object represents the metadata information of a Profile. @@ -63037,7 +62615,7 @@ type ProfileMetadata struct { // Type of the Profile Key string `xml:"key" json:"key"` // Type identifier for the ApplyProfile - ProfileTypeName string `xml:"profileTypeName,omitempty" json:"profileTypeName,omitempty" vim:"5.0"` + ProfileTypeName string `xml:"profileTypeName,omitempty" json:"profileTypeName,omitempty"` // Property which describes the profile Description *ExtendedDescription `xml:"description,omitempty" json:"description,omitempty"` // Property that determines a sorting order for display purposes. @@ -63046,14 +62624,14 @@ type ProfileMetadata struct { // the list contains more than one sort spec, then the precedence should // be determined by the list order (i.e. sort first by the first spec in // the list, then sort by the second spec in the list, etc). - SortSpec []ProfileMetadataProfileSortSpec `xml:"sortSpec,omitempty" json:"sortSpec,omitempty" vim:"5.0"` + SortSpec []ProfileMetadataProfileSortSpec `xml:"sortSpec,omitempty" json:"sortSpec,omitempty"` // Identifies the profile category that this subprofile is a part of. // // The // value of this string should correspond to the key value of a // `ProfileCategoryMetadata` object's `ElementDescription.key` // in its `ProfileCategoryMetadata.id` property. - ProfileCategory string `xml:"profileCategory,omitempty" json:"profileCategory,omitempty" vim:"5.1"` + ProfileCategory string `xml:"profileCategory,omitempty" json:"profileCategory,omitempty"` // Property indicating that the subprofile described by this // ProfileMetadata object is declared in the // `ProfileComponentMetadata.profileTypeNames` of the specified @@ -63066,14 +62644,13 @@ type ProfileMetadata struct { // This property should not be present for subprofiles that are not directly // declared in the `ProfileComponentMetadata.profileTypeNames` // property of a `ProfileComponentMetadata` object. - ProfileComponent string `xml:"profileComponent,omitempty" json:"profileComponent,omitempty" vim:"5.1"` + ProfileComponent string `xml:"profileComponent,omitempty" json:"profileComponent,omitempty"` // A list of ProfileOperationMessage for this profile. - OperationMessages []ProfileMetadataProfileOperationMessage `xml:"operationMessages,omitempty" json:"operationMessages,omitempty" vim:"6.7"` + OperationMessages []ProfileMetadataProfileOperationMessage `xml:"operationMessages,omitempty" json:"operationMessages,omitempty"` } func init() { t["ProfileMetadata"] = reflect.TypeOf((*ProfileMetadata)(nil)).Elem() - minAPIVersionForType["ProfileMetadata"] = "4.0" } // Some operations on host profile documents may cause unexpected result. @@ -63093,7 +62670,6 @@ type ProfileMetadataProfileOperationMessage struct { func init() { t["ProfileMetadataProfileOperationMessage"] = reflect.TypeOf((*ProfileMetadataProfileOperationMessage)(nil)).Elem() - minAPIVersionForType["ProfileMetadataProfileOperationMessage"] = "6.7" } type ProfileMetadataProfileSortSpec struct { @@ -63127,18 +62703,17 @@ type ProfileParameterMetadata struct { // Default value that can be used for the parameter. DefaultValue AnyType `xml:"defaultValue,omitempty,typeattr" json:"defaultValue,omitempty"` // Whether the parameter will not be displayed in UI. - Hidden *bool `xml:"hidden" json:"hidden,omitempty" vim:"6.5"` + Hidden *bool `xml:"hidden" json:"hidden,omitempty"` // Whether the parameter is security sensitive. - SecuritySensitive *bool `xml:"securitySensitive" json:"securitySensitive,omitempty" vim:"6.5"` + SecuritySensitive *bool `xml:"securitySensitive" json:"securitySensitive,omitempty"` // Indicates that the parameter value is read-only. - ReadOnly *bool `xml:"readOnly" json:"readOnly,omitempty" vim:"6.5"` + ReadOnly *bool `xml:"readOnly" json:"readOnly,omitempty"` // Relations with other profile or parameters. - ParameterRelations []ProfileParameterMetadataParameterRelationMetadata `xml:"parameterRelations,omitempty" json:"parameterRelations,omitempty" vim:"6.7"` + ParameterRelations []ProfileParameterMetadataParameterRelationMetadata `xml:"parameterRelations,omitempty" json:"parameterRelations,omitempty"` } func init() { t["ProfileParameterMetadata"] = reflect.TypeOf((*ProfileParameterMetadata)(nil)).Elem() - minAPIVersionForType["ProfileParameterMetadata"] = "4.0" } // This class to define a relation between the parameter and a profile @@ -63160,7 +62735,6 @@ type ProfileParameterMetadataParameterRelationMetadata struct { func init() { t["ProfileParameterMetadataParameterRelationMetadata"] = reflect.TypeOf((*ProfileParameterMetadataParameterRelationMetadata)(nil)).Elem() - minAPIVersionForType["ProfileParameterMetadataParameterRelationMetadata"] = "6.7" } // The `ProfilePolicy` data object represents a policy. @@ -63175,7 +62749,6 @@ type ProfilePolicy struct { func init() { t["ProfilePolicy"] = reflect.TypeOf((*ProfilePolicy)(nil)).Elem() - minAPIVersionForType["ProfilePolicy"] = "4.0" } // The `ProfilePolicyMetadata` data object represents the metadata information @@ -63196,7 +62769,6 @@ type ProfilePolicyMetadata struct { func init() { t["ProfilePolicyMetadata"] = reflect.TypeOf((*ProfilePolicyMetadata)(nil)).Elem() - minAPIVersionForType["ProfilePolicyMetadata"] = "4.0" } // The `ProfilePolicyOptionMetadata` data object contains the metadata information @@ -63205,17 +62777,17 @@ type ProfilePolicyOptionMetadata struct { DynamicData // Identifier for the policy option. - // - The id.key value - // (`ExtendedElementDescription*.*ElementDescription.key`) - // identifies the policy option type. - // - The id.label property - // (`ExtendedElementDescription*.*Description.label`) - // contains a brief localizable message describing the policy option. - // - The id.summary property - // (`ExtendedElementDescription*.*Description.summary`) - // contains a localizable summary of the policy option. - // Summary information can contain embedded variable names which can - // be replaced with values from the parameter property. + // - The id.key value + // (`ExtendedElementDescription*.*ElementDescription.key`) + // identifies the policy option type. + // - The id.label property + // (`ExtendedElementDescription*.*Description.label`) + // contains a brief localizable message describing the policy option. + // - The id.summary property + // (`ExtendedElementDescription*.*Description.summary`) + // contains a localizable summary of the policy option. + // Summary information can contain embedded variable names which can + // be replaced with values from the parameter property. Id ExtendedElementDescription `xml:"id" json:"id"` // Metadata about the parameters for the policy option. Parameter []ProfileParameterMetadata `xml:"parameter,omitempty" json:"parameter,omitempty"` @@ -63223,7 +62795,6 @@ type ProfilePolicyOptionMetadata struct { func init() { t["ProfilePolicyOptionMetadata"] = reflect.TypeOf((*ProfilePolicyOptionMetadata)(nil)).Elem() - minAPIVersionForType["ProfilePolicyOptionMetadata"] = "4.0" } type ProfileProfileStructure struct { @@ -63275,14 +62846,13 @@ type ProfilePropertyPath struct { // // See `PolicyOption*.*PolicyOption.parameter` // and `KeyAnyValue.key`. - ParameterId string `xml:"parameterId,omitempty" json:"parameterId,omitempty" vim:"5.1"` + ParameterId string `xml:"parameterId,omitempty" json:"parameterId,omitempty"` // Policy option identifier. - PolicyOptionId string `xml:"policyOptionId,omitempty" json:"policyOptionId,omitempty" vim:"6.7"` + PolicyOptionId string `xml:"policyOptionId,omitempty" json:"policyOptionId,omitempty"` } func init() { t["ProfilePropertyPath"] = reflect.TypeOf((*ProfilePropertyPath)(nil)).Elem() - minAPIVersionForType["ProfilePropertyPath"] = "4.0" } // This event records that the reference host associated with this profile has changed @@ -63294,14 +62864,13 @@ type ProfileReferenceHostChangedEvent struct { // Refers instance of `HostSystem`. ReferenceHost *ManagedObjectReference `xml:"referenceHost,omitempty" json:"referenceHost,omitempty"` // The newly associated reference host name - ReferenceHostName string `xml:"referenceHostName,omitempty" json:"referenceHostName,omitempty" vim:"6.5"` + ReferenceHostName string `xml:"referenceHostName,omitempty" json:"referenceHostName,omitempty"` // The previous reference host name - PrevReferenceHostName string `xml:"prevReferenceHostName,omitempty" json:"prevReferenceHostName,omitempty" vim:"6.5"` + PrevReferenceHostName string `xml:"prevReferenceHostName,omitempty" json:"prevReferenceHostName,omitempty"` } func init() { t["ProfileReferenceHostChangedEvent"] = reflect.TypeOf((*ProfileReferenceHostChangedEvent)(nil)).Elem() - minAPIVersionForType["ProfileReferenceHostChangedEvent"] = "4.0" } // This event records that a Profile was removed. @@ -63311,7 +62880,6 @@ type ProfileRemovedEvent struct { func init() { t["ProfileRemovedEvent"] = reflect.TypeOf((*ProfileRemovedEvent)(nil)).Elem() - minAPIVersionForType["ProfileRemovedEvent"] = "4.0" } // The `ProfileSerializedCreateSpec` data object @@ -63325,7 +62893,6 @@ type ProfileSerializedCreateSpec struct { func init() { t["ProfileSerializedCreateSpec"] = reflect.TypeOf((*ProfileSerializedCreateSpec)(nil)).Elem() - minAPIVersionForType["ProfileSerializedCreateSpec"] = "4.0" } // DataObject represents a pre-defined expression @@ -63346,7 +62913,6 @@ type ProfileSimpleExpression struct { func init() { t["ProfileSimpleExpression"] = reflect.TypeOf((*ProfileSimpleExpression)(nil)).Elem() - minAPIVersionForType["ProfileSimpleExpression"] = "4.0" } // Errors were detected during Profile update. @@ -63356,12 +62922,11 @@ type ProfileUpdateFailed struct { // Failures encountered during update/validation Failure []ProfileUpdateFailedUpdateFailure `xml:"failure" json:"failure"` // Warnings encountered during update/validation - Warnings []ProfileUpdateFailedUpdateFailure `xml:"warnings,omitempty" json:"warnings,omitempty" vim:"6.7"` + Warnings []ProfileUpdateFailedUpdateFailure `xml:"warnings,omitempty" json:"warnings,omitempty"` } func init() { t["ProfileUpdateFailed"] = reflect.TypeOf((*ProfileUpdateFailed)(nil)).Elem() - minAPIVersionForType["ProfileUpdateFailed"] = "4.0" } type ProfileUpdateFailedFault ProfileUpdateFailed @@ -63416,9 +62981,9 @@ type PropertyChange struct { // // Nested // properties are specified by paths; for example, - // - foo.bar - // - foo.arProp\["key val"\] - // - foo.arProp\["key val"\].baz + // - foo.bar + // - foo.arProp\["key val"\] + // - foo.arProp\["key val"\].baz Name string `xml:"name" json:"name"` // Change operation for the property. // @@ -63478,7 +63043,7 @@ type PropertyFilterSpec struct { // // For a call to `PropertyCollector.RetrieveProperties` missing objects will simply // be omitted from the results. - ReportMissingObjectsInResults *bool `xml:"reportMissingObjectsInResults" json:"reportMissingObjectsInResults,omitempty" vim:"4.1"` + ReportMissingObjectsInResults *bool `xml:"reportMissingObjectsInResults" json:"reportMissingObjectsInResults,omitempty"` } func init() { @@ -63546,6 +63111,27 @@ func init() { t["PropertySpec"] = reflect.TypeOf((*PropertySpec)(nil)).Elem() } +type ProvisionServerPrivateKey ProvisionServerPrivateKeyRequestType + +func init() { + t["ProvisionServerPrivateKey"] = reflect.TypeOf((*ProvisionServerPrivateKey)(nil)).Elem() +} + +// The parameters of `HostCertificateManager.ProvisionServerPrivateKey`. +type ProvisionServerPrivateKeyRequestType struct { + This ManagedObjectReference `xml:"_this" json:"-"` + // SSL private key in PEM format + Key string `xml:"key" json:"key"` +} + +func init() { + t["ProvisionServerPrivateKeyRequestType"] = reflect.TypeOf((*ProvisionServerPrivateKeyRequestType)(nil)).Elem() + minAPIVersionForType["ProvisionServerPrivateKeyRequestType"] = "8.0.3.0" +} + +type ProvisionServerPrivateKeyResponse struct { +} + type PutUsbScanCodes PutUsbScanCodesRequestType func init() { @@ -63663,7 +63249,7 @@ type QueryAvailableDvsSpecRequestType struct { // If set to true, return only the recommended versions. // If set to false, return only the not recommended versions. // If unset, return all supported versions. - Recommended *bool `xml:"recommended" json:"recommended,omitempty" vim:"6.0"` + Recommended *bool `xml:"recommended" json:"recommended,omitempty"` } func init() { @@ -63714,10 +63300,10 @@ type QueryAvailablePerfMetricRequestType struct { EndTime *time.Time `xml:"endTime" json:"endTime,omitempty"` // Period of time from which to retrieve metrics, defined by intervalId // (rather than beginTime or endTime). Valid intervalIds include: - // - For real-time counters, the `refreshRate` of - // the *performance - // provider*. - // - For historical counters, the `samplingPeriod` of the *historical interval*. + // - For real-time counters, the `refreshRate` of + // the *performance + // provider*. + // - For historical counters, the `samplingPeriod` of the *historical interval*. // // If this parameter is not specified, the system returns available metrics // for historical statistics. @@ -63990,6 +63576,7 @@ type QueryCompatibleVmnicsFromHostsRequestType struct { func init() { t["QueryCompatibleVmnicsFromHostsRequestType"] = reflect.TypeOf((*QueryCompatibleVmnicsFromHostsRequestType)(nil)).Elem() + minAPIVersionForType["QueryCompatibleVmnicsFromHostsRequestType"] = "8.0.0.1" } type QueryCompatibleVmnicsFromHostsResponse struct { @@ -64167,7 +63754,7 @@ type QueryConnectionInfoRequestType struct { // The password of the user. Password string `xml:"password" json:"password"` // The expected SSL thumbprint of the host's certificate. - SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty" vim:"2.5"` + SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty"` } func init() { @@ -64336,6 +63923,7 @@ type QueryDirectoryInfoRequestType struct { func init() { t["QueryDirectoryInfoRequestType"] = reflect.TypeOf((*QueryDirectoryInfoRequestType)(nil)).Elem() + minAPIVersionForType["QueryDirectoryInfoRequestType"] = "8.0.1.0" } type QueryDirectoryInfoResponse struct { @@ -64556,7 +64144,7 @@ type QueryExpressionMetadataRequestType struct { // Base profile whose context needs to be used during the operation // // Refers instance of `Profile`. - Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty" vim:"5.0"` + Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty"` } func init() { @@ -64653,6 +64241,7 @@ type QueryFileLockInfoRequestType struct { func init() { t["QueryFileLockInfoRequestType"] = reflect.TypeOf((*QueryFileLockInfoRequestType)(nil)).Elem() + minAPIVersionForType["QueryFileLockInfoRequestType"] = "8.0.2.0" } type QueryFileLockInfoResponse struct { @@ -64856,7 +64445,7 @@ type QueryHostProfileMetadataRequestType struct { // Base profile whose context needs to be used during the operation // // Refers instance of `Profile`. - Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty" vim:"5.0"` + Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty"` } func init() { @@ -65138,6 +64727,7 @@ type QueryMaxQueueDepthRequestType struct { func init() { t["QueryMaxQueueDepthRequestType"] = reflect.TypeOf((*QueryMaxQueueDepthRequestType)(nil)).Elem() + minAPIVersionForType["QueryMaxQueueDepthRequestType"] = "8.0.0.1" } type QueryMaxQueueDepthResponse struct { @@ -65611,7 +65201,7 @@ type QueryPolicyMetadataRequestType struct { // Base profile whose context needs to be used during the operation // // Refers instance of `Profile`. - Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty" vim:"5.0"` + Profile *ManagedObjectReference `xml:"profile,omitempty" json:"profile,omitempty"` } func init() { @@ -65800,6 +65390,7 @@ type QuerySupportedNetworkOffloadSpecRequestType struct { func init() { t["QuerySupportedNetworkOffloadSpecRequestType"] = reflect.TypeOf((*QuerySupportedNetworkOffloadSpecRequestType)(nil)).Elem() + minAPIVersionForType["QuerySupportedNetworkOffloadSpecRequestType"] = "8.0.0.1" } type QuerySupportedNetworkOffloadSpecResponse struct { @@ -66111,6 +65702,44 @@ func init() { t["QueryVirtualDiskUuid"] = reflect.TypeOf((*QueryVirtualDiskUuid)(nil)).Elem() } +type QueryVirtualDiskUuidEx QueryVirtualDiskUuidExRequestType + +func init() { + t["QueryVirtualDiskUuidEx"] = reflect.TypeOf((*QueryVirtualDiskUuidEx)(nil)).Elem() +} + +// The parameters of `VcenterVStorageObjectManager.QueryVirtualDiskUuidEx`. +type QueryVirtualDiskUuidExRequestType struct { + This ManagedObjectReference `xml:"_this" json:"-"` + // The name of the disk, either a datastore path or a URL + // referring to the virtual disk whose uuid for the DDB entry needs to be queried. + // A datastore path has the form + // > \[_datastore_\] _path_ + // + // where + // - _datastore_ is the datastore name. + // - _path_ is a slash-delimited path from the root of the datastore. + // + // An example datastore path is "\[storage\] path/to/file.extension". + Name string `xml:"name" json:"name"` + // If name is a datastore path, the datacenter for + // that datastore path is mandatory. Not needed when invoked directly on ESX. + // If not specified on a call from VirtualCenter, + // name must be a URL. + // + // Refers instance of `Datacenter`. + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty" json:"datacenter,omitempty"` +} + +func init() { + t["QueryVirtualDiskUuidExRequestType"] = reflect.TypeOf((*QueryVirtualDiskUuidExRequestType)(nil)).Elem() + minAPIVersionForType["QueryVirtualDiskUuidExRequestType"] = "8.0.3.0" +} + +type QueryVirtualDiskUuidExResponse struct { + Returnval string `xml:"returnval" json:"returnval"` +} + // The parameters of `VirtualDiskManager.QueryVirtualDiskUuid`. type QueryVirtualDiskUuidRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` @@ -66170,7 +65799,7 @@ type QueryVmfsDatastoreCreateOptionsRequestType struct { // parameter is not specified, then the highest // *supported VMFS major version* for the host // is used. - VmfsMajorVersion int32 `xml:"vmfsMajorVersion,omitempty" json:"vmfsMajorVersion,omitempty" vim:"5.0"` + VmfsMajorVersion int32 `xml:"vmfsMajorVersion,omitempty" json:"vmfsMajorVersion,omitempty"` } func init() { @@ -66225,7 +65854,7 @@ type QueryVmfsDatastoreExtendOptionsRequestType struct { // Free space can be used for adding an extent or expanding an existing // extent. If this parameter is set to true, the list of options // returned will not include free space that can be used for expansion. - SuppressExpandCandidates *bool `xml:"suppressExpandCandidates" json:"suppressExpandCandidates,omitempty" vim:"4.0"` + SuppressExpandCandidates *bool `xml:"suppressExpandCandidates" json:"suppressExpandCandidates,omitempty"` } func init() { @@ -66359,7 +65988,6 @@ type QuestionPending struct { func init() { t["QuestionPending"] = reflect.TypeOf((*QuestionPending)(nil)).Elem() - minAPIVersionForType["QuestionPending"] = "4.1" } type QuestionPendingFault QuestionPending @@ -66389,7 +66017,6 @@ type QuiesceDatastoreIOForHAFailed struct { func init() { t["QuiesceDatastoreIOForHAFailed"] = reflect.TypeOf((*QuiesceDatastoreIOForHAFailed)(nil)).Elem() - minAPIVersionForType["QuiesceDatastoreIOForHAFailed"] = "5.0" } type QuiesceDatastoreIOForHAFailedFault QuiesceDatastoreIOForHAFailed @@ -66410,7 +66037,6 @@ type RDMConversionNotSupported struct { func init() { t["RDMConversionNotSupported"] = reflect.TypeOf((*RDMConversionNotSupported)(nil)).Elem() - minAPIVersionForType["RDMConversionNotSupported"] = "4.0" } type RDMConversionNotSupportedFault RDMConversionNotSupported @@ -66485,7 +66111,6 @@ type RDMNotSupportedOnDatastore struct { func init() { t["RDMNotSupportedOnDatastore"] = reflect.TypeOf((*RDMNotSupportedOnDatastore)(nil)).Elem() - minAPIVersionForType["RDMNotSupportedOnDatastore"] = "2.5" } type RDMNotSupportedOnDatastoreFault RDMNotSupportedOnDatastore @@ -66574,7 +66199,6 @@ type ReadHostResourcePoolTreeFailed struct { func init() { t["ReadHostResourcePoolTreeFailed"] = reflect.TypeOf((*ReadHostResourcePoolTreeFailed)(nil)).Elem() - minAPIVersionForType["ReadHostResourcePoolTreeFailed"] = "5.0" } type ReadHostResourcePoolTreeFailedFault ReadHostResourcePoolTreeFailed @@ -66882,7 +66506,7 @@ type ReconfigurationSatisfiableRequestType struct { Pcbs []VsanPolicyChangeBatch `xml:"pcbs" json:"pcbs"` // Optionally populate PolicyCost even though // object cannot be reconfigured in the current cluster topology. - IgnoreSatisfiability *bool `xml:"ignoreSatisfiability" json:"ignoreSatisfiability,omitempty" vim:"6.0"` + IgnoreSatisfiability *bool `xml:"ignoreSatisfiability" json:"ignoreSatisfiability,omitempty"` } func init() { @@ -67216,7 +66840,7 @@ type ReconnectHostRequestType struct { // reconnect. This will mainly be used to indicate how to // handle divergence between the host settings and vCenter Server // settings when the host was disconnected. - ReconnectSpec *HostSystemReconnectSpec `xml:"reconnectSpec,omitempty" json:"reconnectSpec,omitempty" vim:"5.0"` + ReconnectSpec *HostSystemReconnectSpec `xml:"reconnectSpec,omitempty" json:"reconnectSpec,omitempty"` } func init() { @@ -67243,7 +66867,6 @@ type RecordReplayDisabled struct { func init() { t["RecordReplayDisabled"] = reflect.TypeOf((*RecordReplayDisabled)(nil)).Elem() - minAPIVersionForType["RecordReplayDisabled"] = "4.0" } type RecordReplayDisabledFault RecordReplayDisabled @@ -67268,7 +66891,6 @@ type RecoveryEvent struct { func init() { t["RecoveryEvent"] = reflect.TypeOf((*RecoveryEvent)(nil)).Elem() - minAPIVersionForType["RecoveryEvent"] = "5.1" } // The parameters of `DistributedVirtualSwitch.RectifyDvsHost_Task`. @@ -67746,6 +67368,10 @@ func init() { type RegisterKmipServerRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` // \[in\] KMIP server connection information. + // When register a new KMIP server to the key provider, + // the `KmipServerSpec#defaultKeyType` and + // `KmipServerSpec#wrappingKeySpec` must match + // existing servers. Server KmipServerSpec `xml:"server" json:"server"` } @@ -67949,7 +67575,7 @@ type RelocateVMRequestType struct { Spec VirtualMachineRelocateSpec `xml:"spec" json:"spec"` // The task priority // (see `VirtualMachineMovePriority_enum`). - Priority VirtualMachineMovePriority `xml:"priority,omitempty" json:"priority,omitempty" vim:"4.0"` + Priority VirtualMachineMovePriority `xml:"priority,omitempty" json:"priority,omitempty"` } func init() { @@ -68020,7 +67646,6 @@ type RemoteTSMEnabledEvent struct { func init() { t["RemoteTSMEnabledEvent"] = reflect.TypeOf((*RemoteTSMEnabledEvent)(nil)).Elem() - minAPIVersionForType["RemoteTSMEnabledEvent"] = "4.1" } type RemoveAlarm RemoveAlarmRequestType @@ -68045,7 +67670,11 @@ type RemoveAllSnapshotsRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` // (optional) If set to true, the virtual disks of the deleted // snapshot will be merged with other disk if possible. Default to true. - Consolidate *bool `xml:"consolidate" json:"consolidate,omitempty" vim:"5.0"` + Consolidate *bool `xml:"consolidate" json:"consolidate,omitempty"` + // (optional) When provided, only snapshots satisfying the + // criteria described by the spec will be removed. If unset, all snapshots + // will be removed. + Spec *SnapshotSelectionSpec `xml:"spec,omitempty" json:"spec,omitempty" vim:"8.0.3.0"` } func init() { @@ -68178,12 +67807,12 @@ type RemoveDiskMappingRequestType struct { // before removing it. See `HostMaintenanceSpec`. // If unspecified, there is no action taken to move // data from the disk. - MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty" json:"maintenanceSpec,omitempty" vim:"6.0"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty" json:"maintenanceSpec,omitempty"` // Time to wait for the task to complete in seconds. // If the value is less than or equal to zero, there // is no timeout. The operation fails with a Timedout // exception if it timed out. - Timeout int32 `xml:"timeout,omitempty" json:"timeout,omitempty" vim:"6.0"` + Timeout int32 `xml:"timeout,omitempty" json:"timeout,omitempty"` } func init() { @@ -68209,12 +67838,12 @@ type RemoveDiskRequestType struct { // before removing it. See `HostMaintenanceSpec`. // If unspecified, there is no action taken to move // data from the disk. - MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty" json:"maintenanceSpec,omitempty" vim:"6.0"` + MaintenanceSpec *HostMaintenanceSpec `xml:"maintenanceSpec,omitempty" json:"maintenanceSpec,omitempty"` // Time to wait for the task to complete in seconds. // If the value is less than or equal to zero, there // is no timeout. The operation fails with a Timedout // exception if it timed out. - Timeout int32 `xml:"timeout,omitempty" json:"timeout,omitempty" vim:"6.0"` + Timeout int32 `xml:"timeout,omitempty" json:"timeout,omitempty"` } func init() { @@ -68758,7 +68387,7 @@ type RemoveSnapshotRequestType struct { RemoveChildren bool `xml:"removeChildren" json:"removeChildren"` // (optional) If set to true, the virtual disk associated // with this snapshot will be merged with other disk if possible. Defaults to true. - Consolidate *bool `xml:"consolidate" json:"consolidate,omitempty" vim:"5.0"` + Consolidate *bool `xml:"consolidate" json:"consolidate,omitempty"` } func init() { @@ -68790,6 +68419,7 @@ type RemoveSoftwareAdapterRequestType struct { func init() { t["RemoveSoftwareAdapterRequestType"] = reflect.TypeOf((*RemoveSoftwareAdapterRequestType)(nil)).Elem() + minAPIVersionForType["RemoveSoftwareAdapterRequestType"] = "7.0.3.0" } type RemoveSoftwareAdapterResponse struct { @@ -68975,6 +68605,7 @@ type RenameVStorageObjectExRequestType struct { func init() { t["RenameVStorageObjectExRequestType"] = reflect.TypeOf((*RenameVStorageObjectExRequestType)(nil)).Elem() + minAPIVersionForType["RenameVStorageObjectExRequestType"] = "8.0.2.0" } type RenameVStorageObjectExResponse struct { @@ -69064,7 +68695,6 @@ type ReplicationConfigFault struct { func init() { t["ReplicationConfigFault"] = reflect.TypeOf((*ReplicationConfigFault)(nil)).Elem() - minAPIVersionForType["ReplicationConfigFault"] = "5.0" } type ReplicationConfigFaultFault BaseReplicationConfigFault @@ -69137,25 +68767,28 @@ type ReplicationConfigSpec struct { // // The primary will negotiate the best compression with // the server on the secondary if this is enabled. - NetCompressionEnabled *bool `xml:"netCompressionEnabled" json:"netCompressionEnabled,omitempty" vim:"6.0"` + NetCompressionEnabled *bool `xml:"netCompressionEnabled" json:"netCompressionEnabled,omitempty"` // Flag that indicates whether or not encription should // be used when sending traffic over the network. // // The primary will use the remoteCertificateThumbprint // to verify the identity of the remote server. - NetEncryptionEnabled *bool `xml:"netEncryptionEnabled" json:"netEncryptionEnabled,omitempty" vim:"6.7"` + NetEncryptionEnabled *bool `xml:"netEncryptionEnabled" json:"netEncryptionEnabled,omitempty"` // The IP address of the remote HBR server, target for encrypted LWD. // // This field is required when net encryption is enabled, ignored otherwise. - EncryptionDestination string `xml:"encryptionDestination,omitempty" json:"encryptionDestination,omitempty" vim:"6.7"` + EncryptionDestination string `xml:"encryptionDestination,omitempty" json:"encryptionDestination,omitempty"` // The port on the remote HBR server, target for encrypted LWD. // // This field is only relevant when net encryption is enabled. - EncryptionPort int32 `xml:"encryptionPort,omitempty" json:"encryptionPort,omitempty" vim:"6.7"` + EncryptionPort int32 `xml:"encryptionPort,omitempty" json:"encryptionPort,omitempty"` + // Deprecated field is deprecated, use + // `vim.HbrManager.configureReplicationTargets` instead. + // // The SHA256 thumbprint of the remote server certificate. // // This field is only relevant when net encription is enabled. - RemoteCertificateThumbprint string `xml:"remoteCertificateThumbprint,omitempty" json:"remoteCertificateThumbprint,omitempty" vim:"6.7"` + RemoteCertificateThumbprint string `xml:"remoteCertificateThumbprint,omitempty" json:"remoteCertificateThumbprint,omitempty"` // Flag that indicates whether DataSets files are replicated or not. DataSetsReplicationEnabled *bool `xml:"dataSetsReplicationEnabled" json:"dataSetsReplicationEnabled,omitempty" vim:"8.0.0.0"` // The set of the disks of this VM that are configured for @@ -69165,7 +68798,6 @@ type ReplicationConfigSpec struct { func init() { t["ReplicationConfigSpec"] = reflect.TypeOf((*ReplicationConfigSpec)(nil)).Elem() - minAPIVersionForType["ReplicationConfigSpec"] = "5.0" } // A ReplicationDiskConfigFault is thrown when there is an issue with @@ -69188,7 +68820,6 @@ type ReplicationDiskConfigFault struct { func init() { t["ReplicationDiskConfigFault"] = reflect.TypeOf((*ReplicationDiskConfigFault)(nil)).Elem() - minAPIVersionForType["ReplicationDiskConfigFault"] = "5.0" } type ReplicationDiskConfigFaultFault ReplicationDiskConfigFault @@ -69204,7 +68835,6 @@ type ReplicationFault struct { func init() { t["ReplicationFault"] = reflect.TypeOf((*ReplicationFault)(nil)).Elem() - minAPIVersionForType["ReplicationFault"] = "5.0" } type ReplicationFaultFault BaseReplicationFault @@ -69246,7 +68876,6 @@ type ReplicationGroupId struct { func init() { t["ReplicationGroupId"] = reflect.TypeOf((*ReplicationGroupId)(nil)).Elem() - minAPIVersionForType["ReplicationGroupId"] = "6.5" } // Used to indicate that FT cannot be enabled on a replicated virtual machine @@ -69257,7 +68886,6 @@ type ReplicationIncompatibleWithFT struct { func init() { t["ReplicationIncompatibleWithFT"] = reflect.TypeOf((*ReplicationIncompatibleWithFT)(nil)).Elem() - minAPIVersionForType["ReplicationIncompatibleWithFT"] = "5.0" } type ReplicationIncompatibleWithFTFault ReplicationIncompatibleWithFT @@ -69285,7 +68913,6 @@ type ReplicationInfoDiskSettings struct { func init() { t["ReplicationInfoDiskSettings"] = reflect.TypeOf((*ReplicationInfoDiskSettings)(nil)).Elem() - minAPIVersionForType["ReplicationInfoDiskSettings"] = "5.0" } // A ReplicationInvalidOptions fault is thrown when the options @@ -69303,7 +68930,6 @@ type ReplicationInvalidOptions struct { func init() { t["ReplicationInvalidOptions"] = reflect.TypeOf((*ReplicationInvalidOptions)(nil)).Elem() - minAPIVersionForType["ReplicationInvalidOptions"] = "5.0" } type ReplicationInvalidOptionsFault ReplicationInvalidOptions @@ -69319,7 +68945,6 @@ type ReplicationNotSupportedOnHost struct { func init() { t["ReplicationNotSupportedOnHost"] = reflect.TypeOf((*ReplicationNotSupportedOnHost)(nil)).Elem() - minAPIVersionForType["ReplicationNotSupportedOnHost"] = "5.0" } type ReplicationNotSupportedOnHostFault ReplicationNotSupportedOnHost @@ -69356,7 +68981,6 @@ type ReplicationVmConfigFault struct { func init() { t["ReplicationVmConfigFault"] = reflect.TypeOf((*ReplicationVmConfigFault)(nil)).Elem() - minAPIVersionForType["ReplicationVmConfigFault"] = "5.0" } type ReplicationVmConfigFaultFault ReplicationVmConfigFault @@ -69387,7 +69011,6 @@ type ReplicationVmFault struct { func init() { t["ReplicationVmFault"] = reflect.TypeOf((*ReplicationVmFault)(nil)).Elem() - minAPIVersionForType["ReplicationVmFault"] = "5.0" } type ReplicationVmFaultFault BaseReplicationVmFault @@ -69410,7 +69033,6 @@ type ReplicationVmInProgressFault struct { func init() { t["ReplicationVmInProgressFault"] = reflect.TypeOf((*ReplicationVmInProgressFault)(nil)).Elem() - minAPIVersionForType["ReplicationVmInProgressFault"] = "6.0" } type ReplicationVmInProgressFaultFault ReplicationVmInProgressFault @@ -69458,7 +69080,6 @@ type ReplicationVmProgressInfo struct { func init() { t["ReplicationVmProgressInfo"] = reflect.TypeOf((*ReplicationVmProgressInfo)(nil)).Elem() - minAPIVersionForType["ReplicationVmProgressInfo"] = "5.0" } // A RequestCanceled fault is thrown if the user canceled the task. @@ -69906,7 +69527,7 @@ type ResourceAllocationInfo struct { // at this time. // The server will throw an exception if you attempt to set // this property. - OverheadLimit *int64 `xml:"overheadLimit" json:"overheadLimit,omitempty" vim:"2.5"` + OverheadLimit *int64 `xml:"overheadLimit" json:"overheadLimit,omitempty"` } func init() { @@ -69924,7 +69545,6 @@ type ResourceAllocationOption struct { func init() { t["ResourceAllocationOption"] = reflect.TypeOf((*ResourceAllocationOption)(nil)).Elem() - minAPIVersionForType["ResourceAllocationOption"] = "4.1" } // This data object type is a default value and value range specification @@ -69944,7 +69564,6 @@ type ResourceConfigOption struct { func init() { t["ResourceConfigOption"] = reflect.TypeOf((*ResourceConfigOption)(nil)).Elem() - minAPIVersionForType["ResourceConfigOption"] = "4.1" } // This data object type is a specification for a set of resources @@ -69994,7 +69613,7 @@ type ResourceConfigSpec struct { // pool. The `ResourcePoolRuntimeInfo.sharesScalable` property // indicates whether or not a resource pool's shares are scalable. This // property does not apply to virtual machines. - ScaleDescendantsShares string `xml:"scaleDescendantsShares,omitempty" json:"scaleDescendantsShares,omitempty" vim:"7.0"` + ScaleDescendantsShares string `xml:"scaleDescendantsShares,omitempty" json:"scaleDescendantsShares,omitempty"` } func init() { @@ -70045,7 +69664,6 @@ type ResourceNotAvailable struct { func init() { t["ResourceNotAvailable"] = reflect.TypeOf((*ResourceNotAvailable)(nil)).Elem() - minAPIVersionForType["ResourceNotAvailable"] = "4.0" } type ResourceNotAvailableFault ResourceNotAvailable @@ -70207,12 +69825,11 @@ type ResourcePoolQuickStats struct { // in `ResourcePoolQuickStats.overheadMemory`. ConsumedOverheadMemory int64 `xml:"consumedOverheadMemory,omitempty" json:"consumedOverheadMemory,omitempty"` // The amount of compressed memory currently consumed by VM, in KB. - CompressedMemory int64 `xml:"compressedMemory,omitempty" json:"compressedMemory,omitempty" vim:"4.1"` + CompressedMemory int64 `xml:"compressedMemory,omitempty" json:"compressedMemory,omitempty"` } func init() { t["ResourcePoolQuickStats"] = reflect.TypeOf((*ResourcePoolQuickStats)(nil)).Elem() - minAPIVersionForType["ResourcePoolQuickStats"] = "4.0" } // This event records when a resource pool configuration is changed. @@ -70220,7 +69837,7 @@ type ResourcePoolReconfiguredEvent struct { ResourcePoolEvent // The configuration values changed during the reconfiguration. - ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { @@ -70318,7 +69935,7 @@ type ResourcePoolRuntimeInfo struct { // system will automatically compute this property based on the `ResourceConfigSpec.scaleDescendantsShares` setting on every // ancestor resource pool. This property does not apply to virtual // machines. - SharesScalable string `xml:"sharesScalable,omitempty" json:"sharesScalable,omitempty" vim:"7.0"` + SharesScalable string `xml:"sharesScalable,omitempty" json:"sharesScalable,omitempty"` } func init() { @@ -70341,9 +69958,9 @@ type ResourcePoolSummary struct { // This data object type does not support notification, for scalability reasons. // Therefore, changes in QuickStats do not generate property collector updates. // To monitor statistics values, use the statistics and alarms modules instead. - QuickStats *ResourcePoolQuickStats `xml:"quickStats,omitempty" json:"quickStats,omitempty" vim:"4.0"` + QuickStats *ResourcePoolQuickStats `xml:"quickStats,omitempty" json:"quickStats,omitempty"` // Total configured memory of all virtual machines in the resource pool, in MB. - ConfiguredMemoryMB int32 `xml:"configuredMemoryMB,omitempty" json:"configuredMemoryMB,omitempty" vim:"4.0"` + ConfiguredMemoryMB int32 `xml:"configuredMemoryMB,omitempty" json:"configuredMemoryMB,omitempty"` } func init() { @@ -70431,7 +70048,6 @@ type RestrictedByAdministrator struct { func init() { t["RestrictedByAdministrator"] = reflect.TypeOf((*RestrictedByAdministrator)(nil)).Elem() - minAPIVersionForType["RestrictedByAdministrator"] = "6.0" } type RestrictedByAdministratorFault RestrictedByAdministrator @@ -70448,7 +70064,6 @@ type RestrictedVersion struct { func init() { t["RestrictedVersion"] = reflect.TypeOf((*RestrictedVersion)(nil)).Elem() - minAPIVersionForType["RestrictedVersion"] = "2.5" } type RestrictedVersionFault RestrictedVersion @@ -70954,7 +70569,6 @@ type RetrieveOptions struct { func init() { t["RetrieveOptions"] = reflect.TypeOf((*RetrieveOptions)(nil)).Elem() - minAPIVersionForType["RetrieveOptions"] = "4.1" } type RetrieveProductComponents RetrieveProductComponentsRequestType @@ -71040,7 +70654,6 @@ type RetrieveResult struct { func init() { t["RetrieveResult"] = reflect.TypeOf((*RetrieveResult)(nil)).Elem() - minAPIVersionForType["RetrieveResult"] = "4.1" } type RetrieveRolePermissions RetrieveRolePermissionsRequestType @@ -71254,7 +70867,6 @@ type RetrieveVStorageObjSpec struct { func init() { t["RetrieveVStorageObjSpec"] = reflect.TypeOf((*RetrieveVStorageObjSpec)(nil)).Elem() - minAPIVersionForType["RetrieveVStorageObjSpec"] = "6.7" } type RetrieveVStorageObject RetrieveVStorageObjectRequestType @@ -71416,7 +71028,7 @@ type RevertToCurrentSnapshotRequestType struct { // (optional) If set to true, the virtual // machine will not be powered on regardless of the power state when // the current snapshot was created. Default to false. - SuppressPowerOn *bool `xml:"suppressPowerOn" json:"suppressPowerOn,omitempty" vim:"2.5 U2"` + SuppressPowerOn *bool `xml:"suppressPowerOn" json:"suppressPowerOn,omitempty"` } func init() { @@ -71453,7 +71065,7 @@ type RevertToSnapshotRequestType struct { // (optional) If set to true, the virtual // machine will not be powered on regardless of the power state when // the snapshot was created. Default to false. - SuppressPowerOn *bool `xml:"suppressPowerOn" json:"suppressPowerOn,omitempty" vim:"2.5 U2"` + SuppressPowerOn *bool `xml:"suppressPowerOn" json:"suppressPowerOn,omitempty"` } func init() { @@ -71486,6 +71098,7 @@ type RevertVStorageObjectExRequestType struct { func init() { t["RevertVStorageObjectExRequestType"] = reflect.TypeOf((*RevertVStorageObjectExRequestType)(nil)).Elem() + minAPIVersionForType["RevertVStorageObjectExRequestType"] = "8.0.2.0" } type RevertVStorageObjectEx_Task RevertVStorageObjectExRequestType @@ -71597,11 +71210,11 @@ type RoleUpdatedEvent struct { // The privileges granted to the role. PrivilegeList []string `xml:"privilegeList,omitempty" json:"privilegeList,omitempty"` // The name of the previous role. - PrevRoleName string `xml:"prevRoleName,omitempty" json:"prevRoleName,omitempty" vim:"6.5"` + PrevRoleName string `xml:"prevRoleName,omitempty" json:"prevRoleName,omitempty"` // The privileges added to the role. - PrivilegesAdded []string `xml:"privilegesAdded,omitempty" json:"privilegesAdded,omitempty" vim:"6.5"` + PrivilegesAdded []string `xml:"privilegesAdded,omitempty" json:"privilegesAdded,omitempty"` // The privileges removed from the role. - PrivilegesRemoved []string `xml:"privilegesRemoved,omitempty" json:"privilegesRemoved,omitempty" vim:"6.5"` + PrivilegesRemoved []string `xml:"privilegesRemoved,omitempty" json:"privilegesRemoved,omitempty"` } func init() { @@ -71622,7 +71235,6 @@ type RollbackEvent struct { func init() { t["RollbackEvent"] = reflect.TypeOf((*RollbackEvent)(nil)).Elem() - minAPIVersionForType["RollbackEvent"] = "5.1" } // Thrown if a Rollback operation fails @@ -71637,7 +71249,6 @@ type RollbackFailure struct { func init() { t["RollbackFailure"] = reflect.TypeOf((*RollbackFailure)(nil)).Elem() - minAPIVersionForType["RollbackFailure"] = "5.1" } type RollbackFailureFault RollbackFailure @@ -71660,11 +71271,11 @@ type RuleViolation struct { // violate a rule. // // Refers instance of `HostSystem`. - Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty" vim:"2.5"` + Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty"` // The rule that is violated. // // It can be an affinity or anti-affinity rule. - Rule BaseClusterRuleInfo `xml:"rule,omitempty,typeattr" json:"rule,omitempty" vim:"4.0"` + Rule BaseClusterRuleInfo `xml:"rule,omitempty,typeattr" json:"rule,omitempty"` } func init() { @@ -71780,7 +71391,6 @@ type SAMLTokenAuthentication struct { func init() { t["SAMLTokenAuthentication"] = reflect.TypeOf((*SAMLTokenAuthentication)(nil)).Elem() - minAPIVersionForType["SAMLTokenAuthentication"] = "6.0" } // An empty data object which can be used as the base class for data objects @@ -71794,7 +71404,6 @@ type SDDCBase struct { func init() { t["SDDCBase"] = reflect.TypeOf((*SDDCBase)(nil)).Elem() - minAPIVersionForType["SDDCBase"] = "6.0" } // A SSLDisabledFault fault occurs when a host does not have ssl enabled. @@ -71804,7 +71413,6 @@ type SSLDisabledFault struct { func init() { t["SSLDisabledFault"] = reflect.TypeOf((*SSLDisabledFault)(nil)).Elem() - minAPIVersionForType["SSLDisabledFault"] = "4.0" } type SSLDisabledFaultFault SSLDisabledFault @@ -71839,7 +71447,6 @@ type SSLVerifyFault struct { func init() { t["SSLVerifyFault"] = reflect.TypeOf((*SSLVerifyFault)(nil)).Elem() - minAPIVersionForType["SSLVerifyFault"] = "2.5" } type SSLVerifyFaultFault SSLVerifyFault @@ -71888,7 +71495,6 @@ type SSPIAuthentication struct { func init() { t["SSPIAuthentication"] = reflect.TypeOf((*SSPIAuthentication)(nil)).Elem() - minAPIVersionForType["SSPIAuthentication"] = "5.0" } // Thrown during SSPI pass-through authentication if further @@ -71902,7 +71508,6 @@ type SSPIChallenge struct { func init() { t["SSPIChallenge"] = reflect.TypeOf((*SSPIChallenge)(nil)).Elem() - minAPIVersionForType["SSPIChallenge"] = "2.5" } type SSPIChallengeFault SSPIChallenge @@ -72011,7 +71616,6 @@ type ScheduledHardwareUpgradeInfo struct { func init() { t["ScheduledHardwareUpgradeInfo"] = reflect.TypeOf((*ScheduledHardwareUpgradeInfo)(nil)).Elem() - minAPIVersionForType["ScheduledHardwareUpgradeInfo"] = "5.1" } // This event records the completion of a scheduled task. @@ -72177,7 +71781,7 @@ type ScheduledTaskInfo struct { // This field will have information about either the // ManagedEntity or the ManagedObject on which the scheduled // task is defined. - TaskObject *ManagedObjectReference `xml:"taskObject,omitempty" json:"taskObject,omitempty" vim:"4.0"` + TaskObject *ManagedObjectReference `xml:"taskObject,omitempty" json:"taskObject,omitempty"` } func init() { @@ -72189,7 +71793,7 @@ type ScheduledTaskReconfiguredEvent struct { ScheduledTaskEvent // The configuration values changed during the reconfiguration. - ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { @@ -72298,7 +71902,7 @@ type ScsiLun struct { // only sufficient to identify the ScsiLun within a single host. Each // descriptor contains a quality property that indicates whether or not // the descriptor is suitable for correlation. - Descriptor []ScsiLunDescriptor `xml:"descriptor,omitempty" json:"descriptor,omitempty" vim:"4.0"` + Descriptor []ScsiLunDescriptor `xml:"descriptor,omitempty" json:"descriptor,omitempty"` // Canonical name of the SCSI logical unit. // // Disk partition or extent identifiers refer to this name when @@ -72313,7 +71917,7 @@ type ScsiLun struct { // display name will be used if available. If the display name is not // supported, it will be unset. The display name does not have to be // unique but it is recommended that it be unique. - DisplayName string `xml:"displayName,omitempty" json:"displayName,omitempty" vim:"4.0"` + DisplayName string `xml:"displayName,omitempty" json:"displayName,omitempty"` // The type of SCSI device. // // Must be one of the values of @@ -72348,13 +71952,13 @@ type ScsiLun struct { // Product Data (VPD) and the Identification Vital Product Data (VPD) page // 83h as defined by the SCSI-3 Primary Commands. For devices that are not // SCSI-3 compliant this property is not defined. - AlternateName []ScsiLunDurableName `xml:"alternateName,omitempty" json:"alternateName,omitempty" vim:"2.5"` + AlternateName []ScsiLunDurableName `xml:"alternateName,omitempty" json:"alternateName,omitempty"` // Standard Inquiry payload. // // For a SCSI-3 compliant device this property is derived from the // standard inquiry data. For devices that are not SCSI-3 compliant this // property is not defined. - StandardInquiry []byte `xml:"standardInquiry,omitempty" json:"standardInquiry,omitempty" vim:"2.5"` + StandardInquiry ByteSlice `xml:"standardInquiry,omitempty" json:"standardInquiry,omitempty"` // The queue depth of SCSI device. QueueDepth int32 `xml:"queueDepth,omitempty" json:"queueDepth,omitempty"` // The operational states of the LUN. @@ -72367,7 +71971,7 @@ type ScsiLun struct { // See also `ScsiLunState_enum`. OperationalState []string `xml:"operationalState" json:"operationalState"` // Capabilities of SCSI device. - Capabilities *ScsiLunCapabilities `xml:"capabilities,omitempty" json:"capabilities,omitempty" vim:"4.0"` + Capabilities *ScsiLunCapabilities `xml:"capabilities,omitempty" json:"capabilities,omitempty"` // vStorage hardware acceleration support status. // // This property @@ -72381,7 +71985,7 @@ type ScsiLun struct { // less CPU, memory, and storage fabric bandwidth. // // For vSphere 4.0 or earlier hosts, this value will be unset. - VStorageSupport string `xml:"vStorageSupport,omitempty" json:"vStorageSupport,omitempty" vim:"4.1"` + VStorageSupport string `xml:"vStorageSupport,omitempty" json:"vStorageSupport,omitempty"` // Indicates that this SCSI LUN is protocol endpoint. // // This @@ -72389,16 +71993,16 @@ type ScsiLun struct { // VirtualVolume based Datastore. Check the host capability // `HostCapability.virtualVolumeDatastoreSupported`. // See `HostProtocolEndpoint`. - ProtocolEndpoint *bool `xml:"protocolEndpoint" json:"protocolEndpoint,omitempty" vim:"6.0"` + ProtocolEndpoint *bool `xml:"protocolEndpoint" json:"protocolEndpoint,omitempty"` // Indicates the state of a perennially reserved flag for a LUN. // // If // set for Raw Device Mapped (RDM) LUNs, the host startup or LUN rescan // take comparatively shorter duration than when it is unset. - PerenniallyReserved *bool `xml:"perenniallyReserved" json:"perenniallyReserved,omitempty" vim:"6.7.2"` + PerenniallyReserved *bool `xml:"perenniallyReserved" json:"perenniallyReserved,omitempty"` // Indicates if LUN has the prequisite properties to enable Clustered Vmdk // feature once formatted into VMFS Datastore. - ClusteredVmdkSupported *bool `xml:"clusteredVmdkSupported" json:"clusteredVmdkSupported,omitempty" vim:"7.0"` + ClusteredVmdkSupported *bool `xml:"clusteredVmdkSupported" json:"clusteredVmdkSupported,omitempty"` // Indicates the current device protocol. // // Application protocol for a device which is set based on input @@ -72409,6 +72013,13 @@ type ScsiLun struct { // // Set to true when the namespace of LUN is dispersed. DispersedNs *bool `xml:"dispersedNs" json:"dispersedNs,omitempty" vim:"8.0.1.0"` + // Indicates whether a device is under SCSI/NVMe reservation. + // + // Device reservation for a SCSI/NVMe device set based on + // values received from vmkernel. The list of supported values is defined in + // `ScsiLunLunReservationStatus_enum`. + // If unset, the reservation status is unknown. + DeviceReservation string `xml:"deviceReservation,omitempty" json:"deviceReservation,omitempty" vim:"8.0.3.0"` } func init() { @@ -72425,7 +72036,6 @@ type ScsiLunCapabilities struct { func init() { t["ScsiLunCapabilities"] = reflect.TypeOf((*ScsiLunCapabilities)(nil)).Elem() - minAPIVersionForType["ScsiLunCapabilities"] = "4.0" } // A structure that encapsulates an identifier and its properties for the @@ -72444,7 +72054,6 @@ type ScsiLunDescriptor struct { func init() { t["ScsiLunDescriptor"] = reflect.TypeOf((*ScsiLunDescriptor)(nil)).Elem() - minAPIVersionForType["ScsiLunDescriptor"] = "4.0" } // This data object type represents an SMI-S "Correlatable and @@ -72469,7 +72078,7 @@ type ScsiLunDurableName struct { // along with the payload for data obtained from page 83h, and is the // payload for data obtained from page 80h of the Vital Product Data // (VPD). - Data []byte `xml:"data,omitempty" json:"data,omitempty"` + Data ByteSlice `xml:"data,omitempty" json:"data,omitempty"` } func init() { @@ -72489,7 +72098,6 @@ type SeSparseVirtualDiskSpec struct { func init() { t["SeSparseVirtualDiskSpec"] = reflect.TypeOf((*SeSparseVirtualDiskSpec)(nil)).Elem() - minAPIVersionForType["SeSparseVirtualDiskSpec"] = "5.1" } // The parameters of `HostDatastoreBrowser.SearchDatastore_Task`. @@ -72545,7 +72153,6 @@ type SecondaryVmAlreadyDisabled struct { func init() { t["SecondaryVmAlreadyDisabled"] = reflect.TypeOf((*SecondaryVmAlreadyDisabled)(nil)).Elem() - minAPIVersionForType["SecondaryVmAlreadyDisabled"] = "4.0" } type SecondaryVmAlreadyDisabledFault SecondaryVmAlreadyDisabled @@ -72565,7 +72172,6 @@ type SecondaryVmAlreadyEnabled struct { func init() { t["SecondaryVmAlreadyEnabled"] = reflect.TypeOf((*SecondaryVmAlreadyEnabled)(nil)).Elem() - minAPIVersionForType["SecondaryVmAlreadyEnabled"] = "4.0" } type SecondaryVmAlreadyEnabledFault SecondaryVmAlreadyEnabled @@ -72586,7 +72192,6 @@ type SecondaryVmAlreadyRegistered struct { func init() { t["SecondaryVmAlreadyRegistered"] = reflect.TypeOf((*SecondaryVmAlreadyRegistered)(nil)).Elem() - minAPIVersionForType["SecondaryVmAlreadyRegistered"] = "4.0" } type SecondaryVmAlreadyRegisteredFault SecondaryVmAlreadyRegistered @@ -72607,7 +72212,6 @@ type SecondaryVmNotRegistered struct { func init() { t["SecondaryVmNotRegistered"] = reflect.TypeOf((*SecondaryVmNotRegistered)(nil)).Elem() - minAPIVersionForType["SecondaryVmNotRegistered"] = "4.0" } type SecondaryVmNotRegisteredFault SecondaryVmNotRegistered @@ -72640,12 +72244,11 @@ type SecurityProfile struct { ApplyProfile // Permission configuration. - Permission []PermissionProfile `xml:"permission,omitempty" json:"permission,omitempty" vim:"4.1"` + Permission []PermissionProfile `xml:"permission,omitempty" json:"permission,omitempty"` } func init() { t["SecurityProfile"] = reflect.TypeOf((*SecurityProfile)(nil)).Elem() - minAPIVersionForType["SecurityProfile"] = "4.0" } type SelectActivePartition SelectActivePartitionRequestType @@ -72716,7 +72319,6 @@ type SelectionSet struct { func init() { t["SelectionSet"] = reflect.TypeOf((*SelectionSet)(nil)).Elem() - minAPIVersionForType["SelectionSet"] = "5.0" } // The `SelectionSpec` is the base type for data @@ -72847,7 +72449,6 @@ type ServiceConsolePortGroupProfile struct { func init() { t["ServiceConsolePortGroupProfile"] = reflect.TypeOf((*ServiceConsolePortGroupProfile)(nil)).Elem() - minAPIVersionForType["ServiceConsolePortGroupProfile"] = "4.0" } // The ServiceConsoleReservationInfo data object type describes the @@ -72903,7 +72504,7 @@ type ServiceContent struct { // A singleton managed object for tracking custom sets of objects. // // Refers instance of `ViewManager`. - ViewManager *ManagedObjectReference `xml:"viewManager,omitempty" json:"viewManager,omitempty" vim:"2.5"` + ViewManager *ManagedObjectReference `xml:"viewManager,omitempty" json:"viewManager,omitempty"` // Information about the service, such as the software version. About AboutInfo `xml:"about" json:"about"` // Generic configuration for a management server. @@ -72931,7 +72532,7 @@ type ServiceContent struct { // A singleton managed object that manages local services. // // Refers instance of `ServiceManager`. - ServiceManager *ManagedObjectReference `xml:"serviceManager,omitempty" json:"serviceManager,omitempty" vim:"5.1"` + ServiceManager *ManagedObjectReference `xml:"serviceManager,omitempty" json:"serviceManager,omitempty"` // A singleton managed object that manages the collection and reporting // of performance statistics. // @@ -72956,7 +72557,7 @@ type ServiceContent struct { // A singleton managed object that manages extensions. // // Refers instance of `ExtensionManager`. - ExtensionManager *ManagedObjectReference `xml:"extensionManager,omitempty" json:"extensionManager,omitempty" vim:"2.5"` + ExtensionManager *ManagedObjectReference `xml:"extensionManager,omitempty" json:"extensionManager,omitempty"` // A singleton managed object that manages saved guest customization // specifications. // @@ -72969,7 +72570,7 @@ type ServiceContent struct { // InstantClone operation. See `VirtualMachine.InstantClone_Task`. // // Refers instance of `VirtualMachineGuestCustomizationManager`. - GuestCustomizationManager *ManagedObjectReference `xml:"guestCustomizationManager,omitempty" json:"guestCustomizationManager,omitempty" vim:"6.8.7"` + GuestCustomizationManager *ManagedObjectReference `xml:"guestCustomizationManager,omitempty" json:"guestCustomizationManager,omitempty"` // A singleton managed object that managed custom fields. // // Refers instance of `CustomFieldsManager`. @@ -72994,101 +72595,101 @@ type ServiceContent struct { // on datastores. // // Refers instance of `FileManager`. - FileManager *ManagedObjectReference `xml:"fileManager,omitempty" json:"fileManager,omitempty" vim:"2.5"` + FileManager *ManagedObjectReference `xml:"fileManager,omitempty" json:"fileManager,omitempty"` // Datastore Namespace manager. // // A singleton managed object that is used to manage manipulations // related to datastores' namespaces. // // Refers instance of `DatastoreNamespaceManager`. - DatastoreNamespaceManager *ManagedObjectReference `xml:"datastoreNamespaceManager,omitempty" json:"datastoreNamespaceManager,omitempty" vim:"5.5"` + DatastoreNamespaceManager *ManagedObjectReference `xml:"datastoreNamespaceManager,omitempty" json:"datastoreNamespaceManager,omitempty"` // A singleton managed object that allows management of virtual disks // on datastores. // // Refers instance of `VirtualDiskManager`. - VirtualDiskManager *ManagedObjectReference `xml:"virtualDiskManager,omitempty" json:"virtualDiskManager,omitempty" vim:"2.5"` + VirtualDiskManager *ManagedObjectReference `xml:"virtualDiskManager,omitempty" json:"virtualDiskManager,omitempty"` // Deprecated as of VI API 2.5, use the VMware vCenter Converter plug-in. // // A singleton managed object that manages the discovery, analysis, // recommendation and virtualization of physical machines // // Refers instance of `VirtualizationManager`. - VirtualizationManager *ManagedObjectReference `xml:"virtualizationManager,omitempty" json:"virtualizationManager,omitempty" vim:"2.5"` + VirtualizationManager *ManagedObjectReference `xml:"virtualizationManager,omitempty" json:"virtualizationManager,omitempty"` // A singleton managed object that allows SNMP configuration. // // Not set if not supported on a particular platform. // // Refers instance of `HostSnmpSystem`. - SnmpSystem *ManagedObjectReference `xml:"snmpSystem,omitempty" json:"snmpSystem,omitempty" vim:"4.0"` + SnmpSystem *ManagedObjectReference `xml:"snmpSystem,omitempty" json:"snmpSystem,omitempty"` // A singleton managed object that can answer questions about the // feasibility of certain provisioning operations. // // Refers instance of `VirtualMachineProvisioningChecker`. - VmProvisioningChecker *ManagedObjectReference `xml:"vmProvisioningChecker,omitempty" json:"vmProvisioningChecker,omitempty" vim:"4.0"` + VmProvisioningChecker *ManagedObjectReference `xml:"vmProvisioningChecker,omitempty" json:"vmProvisioningChecker,omitempty"` // A singleton managed object that can answer questions about compatibility // of a virtual machine with a host. // // Refers instance of `VirtualMachineCompatibilityChecker`. - VmCompatibilityChecker *ManagedObjectReference `xml:"vmCompatibilityChecker,omitempty" json:"vmCompatibilityChecker,omitempty" vim:"4.0"` + VmCompatibilityChecker *ManagedObjectReference `xml:"vmCompatibilityChecker,omitempty" json:"vmCompatibilityChecker,omitempty"` // A singleton managed object that can generate OVF descriptors (export) and create // vApps (single-VM or vApp container-based) from OVF descriptors (import). // // Refers instance of `OvfManager`. - OvfManager *ManagedObjectReference `xml:"ovfManager,omitempty" json:"ovfManager,omitempty" vim:"4.0"` + OvfManager *ManagedObjectReference `xml:"ovfManager,omitempty" json:"ovfManager,omitempty"` // A singleton managed object that supports management of IpPool objects. // // IP pools are // used when allocating IPv4 and IPv6 addresses to vApps. // // Refers instance of `IpPoolManager`. - IpPoolManager *ManagedObjectReference `xml:"ipPoolManager,omitempty" json:"ipPoolManager,omitempty" vim:"4.0"` + IpPoolManager *ManagedObjectReference `xml:"ipPoolManager,omitempty" json:"ipPoolManager,omitempty"` // A singleton managed object that provides relevant information of // DistributedVirtualSwitch. // // Refers instance of `DistributedVirtualSwitchManager`. - DvSwitchManager *ManagedObjectReference `xml:"dvSwitchManager,omitempty" json:"dvSwitchManager,omitempty" vim:"4.0"` + DvSwitchManager *ManagedObjectReference `xml:"dvSwitchManager,omitempty" json:"dvSwitchManager,omitempty"` // A singleton managed object that manages the host profiles. // // Refers instance of `HostProfileManager`. - HostProfileManager *ManagedObjectReference `xml:"hostProfileManager,omitempty" json:"hostProfileManager,omitempty" vim:"4.0"` + HostProfileManager *ManagedObjectReference `xml:"hostProfileManager,omitempty" json:"hostProfileManager,omitempty"` // A singleton managed object that manages the cluster profiles. // // Refers instance of `ClusterProfileManager`. - ClusterProfileManager *ManagedObjectReference `xml:"clusterProfileManager,omitempty" json:"clusterProfileManager,omitempty" vim:"4.0"` + ClusterProfileManager *ManagedObjectReference `xml:"clusterProfileManager,omitempty" json:"clusterProfileManager,omitempty"` // A singleton managed object that manages compliance aspects of entities. // // Refers instance of `ProfileComplianceManager`. - ComplianceManager *ManagedObjectReference `xml:"complianceManager,omitempty" json:"complianceManager,omitempty" vim:"4.0"` + ComplianceManager *ManagedObjectReference `xml:"complianceManager,omitempty" json:"complianceManager,omitempty"` // A singleton managed object that provides methods for retrieving message // catalogs for client-side localization support. // // Refers instance of `LocalizationManager`. - LocalizationManager *ManagedObjectReference `xml:"localizationManager,omitempty" json:"localizationManager,omitempty" vim:"4.0"` + LocalizationManager *ManagedObjectReference `xml:"localizationManager,omitempty" json:"localizationManager,omitempty"` // A singleton managed object that provides methods for storage resource // management. // // Refers instance of `StorageResourceManager`. - StorageResourceManager *ManagedObjectReference `xml:"storageResourceManager,omitempty" json:"storageResourceManager,omitempty" vim:"4.1"` + StorageResourceManager *ManagedObjectReference `xml:"storageResourceManager,omitempty" json:"storageResourceManager,omitempty"` // A singleton managed object that provides methods for guest operations. // // Refers instance of `GuestOperationsManager`. - GuestOperationsManager *ManagedObjectReference `xml:"guestOperationsManager,omitempty" json:"guestOperationsManager,omitempty" vim:"5.0"` + GuestOperationsManager *ManagedObjectReference `xml:"guestOperationsManager,omitempty" json:"guestOperationsManager,omitempty"` // A singleton managed object that provides methods for looking up static VM // overhead memory. // // Refers instance of `OverheadMemoryManager`. - OverheadMemoryManager *ManagedObjectReference `xml:"overheadMemoryManager,omitempty" json:"overheadMemoryManager,omitempty" vim:"6.0"` + OverheadMemoryManager *ManagedObjectReference `xml:"overheadMemoryManager,omitempty" json:"overheadMemoryManager,omitempty"` // host certificate manager // A singleton managed object to manage the certificates between the // Certificate Server and the host. // // Refers instance of `CertificateManager`. - CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty" json:"certificateManager,omitempty" vim:"6.0"` + CertificateManager *ManagedObjectReference `xml:"certificateManager,omitempty" json:"certificateManager,omitempty"` // A singleton managed object that manages IO Filters installed on the ESXi // hosts and IO Filter configuration of virtual disks. // // Refers instance of `IoFilterManager`. - IoFilterManager *ManagedObjectReference `xml:"ioFilterManager,omitempty" json:"ioFilterManager,omitempty" vim:"6.0"` + IoFilterManager *ManagedObjectReference `xml:"ioFilterManager,omitempty" json:"ioFilterManager,omitempty"` // A singleton managed object that manages all storage objects in the // Virtual Infrastructure. // @@ -73100,40 +72701,40 @@ type ServiceContent struct { // vStorageObject. // // Refers instance of `VStorageObjectManagerBase`. - VStorageObjectManager *ManagedObjectReference `xml:"vStorageObjectManager,omitempty" json:"vStorageObjectManager,omitempty" vim:"6.5"` + VStorageObjectManager *ManagedObjectReference `xml:"vStorageObjectManager,omitempty" json:"vStorageObjectManager,omitempty"` // A singleton managed object that manages the host specification data. // // Refers instance of `HostSpecificationManager`. - HostSpecManager *ManagedObjectReference `xml:"hostSpecManager,omitempty" json:"hostSpecManager,omitempty" vim:"6.5"` + HostSpecManager *ManagedObjectReference `xml:"hostSpecManager,omitempty" json:"hostSpecManager,omitempty"` // A singleton managed object used to manage cryptographic keys. // // Refers instance of `CryptoManager`. - CryptoManager *ManagedObjectReference `xml:"cryptoManager,omitempty" json:"cryptoManager,omitempty" vim:"6.5"` + CryptoManager *ManagedObjectReference `xml:"cryptoManager,omitempty" json:"cryptoManager,omitempty"` // A singleton managed object that manages the health updates. // // Refers instance of `HealthUpdateManager`. - HealthUpdateManager *ManagedObjectReference `xml:"healthUpdateManager,omitempty" json:"healthUpdateManager,omitempty" vim:"6.5"` + HealthUpdateManager *ManagedObjectReference `xml:"healthUpdateManager,omitempty" json:"healthUpdateManager,omitempty"` // A singleton managed object that manages the VCHA Cluster // configuration. // // Refers instance of `FailoverClusterConfigurator`. - FailoverClusterConfigurator *ManagedObjectReference `xml:"failoverClusterConfigurator,omitempty" json:"failoverClusterConfigurator,omitempty" vim:"6.5"` + FailoverClusterConfigurator *ManagedObjectReference `xml:"failoverClusterConfigurator,omitempty" json:"failoverClusterConfigurator,omitempty"` // A singleton managed object for managing a configured VCHA Cluster. // // Refers instance of `FailoverClusterManager`. - FailoverClusterManager *ManagedObjectReference `xml:"failoverClusterManager,omitempty" json:"failoverClusterManager,omitempty" vim:"6.5"` + FailoverClusterManager *ManagedObjectReference `xml:"failoverClusterManager,omitempty" json:"failoverClusterManager,omitempty"` // A singleton managed object used to configure tenants. // // Refers instance of `TenantTenantManager`. - TenantManager *ManagedObjectReference `xml:"tenantManager,omitempty" json:"tenantManager,omitempty" vim:"6.9.1"` + TenantManager *ManagedObjectReference `xml:"tenantManager,omitempty" json:"tenantManager,omitempty"` // A singleton managed object used to manage site related capabilities. // // Refers instance of `SiteInfoManager`. - SiteInfoManager *ManagedObjectReference `xml:"siteInfoManager,omitempty" json:"siteInfoManager,omitempty" vim:"7.0"` + SiteInfoManager *ManagedObjectReference `xml:"siteInfoManager,omitempty" json:"siteInfoManager,omitempty"` // A singleton managed object used to query storage related entities. // // Refers instance of `StorageQueryManager`. - StorageQueryManager *ManagedObjectReference `xml:"storageQueryManager,omitempty" json:"storageQueryManager,omitempty" vim:"6.7.2"` + StorageQueryManager *ManagedObjectReference `xml:"storageQueryManager,omitempty" json:"storageQueryManager,omitempty"` } func init() { @@ -73166,7 +72767,6 @@ type ServiceLocator struct { func init() { t["ServiceLocator"] = reflect.TypeOf((*ServiceLocator)(nil)).Elem() - minAPIVersionForType["ServiceLocator"] = "6.0" } // The data object type is a base type of credential for authentication such @@ -73177,7 +72777,6 @@ type ServiceLocatorCredential struct { func init() { t["ServiceLocatorCredential"] = reflect.TypeOf((*ServiceLocatorCredential)(nil)).Elem() - minAPIVersionForType["ServiceLocatorCredential"] = "6.0" } // The data object type specifies the username and password credential for @@ -73193,7 +72792,6 @@ type ServiceLocatorNamePassword struct { func init() { t["ServiceLocatorNamePassword"] = reflect.TypeOf((*ServiceLocatorNamePassword)(nil)).Elem() - minAPIVersionForType["ServiceLocatorNamePassword"] = "6.0" } // The data object type specifies the SAML token (SSO) based credential for @@ -73207,7 +72805,6 @@ type ServiceLocatorSAMLCredential struct { func init() { t["ServiceLocatorSAMLCredential"] = reflect.TypeOf((*ServiceLocatorSAMLCredential)(nil)).Elem() - minAPIVersionForType["ServiceLocatorSAMLCredential"] = "6.0" } // This data object represents essential information about a particular service. @@ -73260,7 +72857,6 @@ type ServiceProfile struct { func init() { t["ServiceProfile"] = reflect.TypeOf((*ServiceProfile)(nil)).Elem() - minAPIVersionForType["ServiceProfile"] = "4.0" } // These are session events. @@ -73311,11 +72907,11 @@ type SessionManagerGenericServiceTicket struct { // A unique string identifying the ticket. Id string `xml:"id" json:"id"` // The name of the host that the service is running on - HostName string `xml:"hostName,omitempty" json:"hostName,omitempty" vim:"5.1"` + HostName string `xml:"hostName,omitempty" json:"hostName,omitempty"` // The expected thumbprint of the SSL certificate of the host. // // If it is empty, the host must be authenticated by name. - SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty" vim:"5.1"` + SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty"` // List of expected thumbprints of the certificate of the host to // which we are connecting. // @@ -73331,7 +72927,6 @@ type SessionManagerGenericServiceTicket struct { func init() { t["SessionManagerGenericServiceTicket"] = reflect.TypeOf((*SessionManagerGenericServiceTicket)(nil)).Elem() - minAPIVersionForType["SessionManagerGenericServiceTicket"] = "5.0" } // This data object type describes a request to an HTTP or HTTPS service. @@ -73349,8 +72944,8 @@ type SessionManagerHttpServiceRequestSpec struct { // E.g. 'https://127.0.0.1:8080/cgi-bin/vm-support.cgi?n=val'. // // For ESXi CGI service requests: - // - only the path and query parts of the URL are used - // (e.g. "/cgi-bin/vm-support.cgi?n=val"). + // - only the path and query parts of the URL are used + // (e.g. "/cgi-bin/vm-support.cgi?n=val"). // // This is so because the scheme is not known to the CGI service, // and the port may not be the same if using a proxy. @@ -73359,7 +72954,6 @@ type SessionManagerHttpServiceRequestSpec struct { func init() { t["SessionManagerHttpServiceRequestSpec"] = reflect.TypeOf((*SessionManagerHttpServiceRequestSpec)(nil)).Elem() - minAPIVersionForType["SessionManagerHttpServiceRequestSpec"] = "5.0" } // This data object type contains the user name @@ -73390,7 +72984,6 @@ type SessionManagerServiceRequestSpec struct { func init() { t["SessionManagerServiceRequestSpec"] = reflect.TypeOf((*SessionManagerServiceRequestSpec)(nil)).Elem() - minAPIVersionForType["SessionManagerServiceRequestSpec"] = "5.0" } // This data object type describes a request to invoke a specific method @@ -73409,7 +73002,6 @@ type SessionManagerVmomiServiceRequestSpec struct { func init() { t["SessionManagerVmomiServiceRequestSpec"] = reflect.TypeOf((*SessionManagerVmomiServiceRequestSpec)(nil)).Elem() - minAPIVersionForType["SessionManagerVmomiServiceRequestSpec"] = "5.1" } // This event records the termination of a session. @@ -73461,6 +73053,10 @@ type SetCryptoModeRequestType struct { // input and will be interpreted as // `onDemand`. CryptoMode string `xml:"cryptoMode" json:"cryptoMode"` + // The encryption mode policy for the cluster. When no policy + // is specified, host keys will be automcatically generated + // using the current default key provider. + Policy *ClusterComputeResourceCryptoModePolicy `xml:"policy,omitempty" json:"policy,omitempty" vim:"8.0.3.0"` } func init() { @@ -73610,6 +73206,7 @@ type SetKeyCustomAttributesRequestType struct { func init() { t["SetKeyCustomAttributesRequestType"] = reflect.TypeOf((*SetKeyCustomAttributesRequestType)(nil)).Elem() + minAPIVersionForType["SetKeyCustomAttributesRequestType"] = "8.0.1.0" } type SetKeyCustomAttributesResponse struct { @@ -73688,6 +73285,7 @@ type SetMaxQueueDepthRequestType struct { func init() { t["SetMaxQueueDepthRequestType"] = reflect.TypeOf((*SetMaxQueueDepthRequestType)(nil)).Elem() + minAPIVersionForType["SetMaxQueueDepthRequestType"] = "8.0.0.1" } type SetMaxQueueDepthResponse struct { @@ -73834,6 +73432,7 @@ type SetServiceAccountRequestType struct { func init() { t["SetServiceAccountRequestType"] = reflect.TypeOf((*SetServiceAccountRequestType)(nil)).Elem() + minAPIVersionForType["SetServiceAccountRequestType"] = "8.0.2.0" } type SetServiceAccountResponse struct { @@ -73923,6 +73522,47 @@ func init() { t["SetVirtualDiskUuid"] = reflect.TypeOf((*SetVirtualDiskUuid)(nil)).Elem() } +// The parameters of `VcenterVStorageObjectManager.SetVirtualDiskUuidEx_Task`. +type SetVirtualDiskUuidExRequestType struct { + This ManagedObjectReference `xml:"_this" json:"-"` + // The name of the disk, either a datastore path or a URL + // referring to the virtual disk whose uuid for the DDB entry needs to be set. + // A datastore path has the form + // > \[_datastore_\] _path_ + // + // where + // - _datastore_ is the datastore name. + // - _path_ is a slash-delimited path from the root of the datastore. + // + // An example datastore path is "\[storage\] path/to/file.extension". + Name string `xml:"name" json:"name"` + // If name is a datastore path, the datacenter for + // that datastore path is mandatory. Not needed when invoked directly on ESX. + // If not specified on a call from VirtualCenter, + // name must be a URL. + // + // Refers instance of `Datacenter`. + Datacenter *ManagedObjectReference `xml:"datacenter,omitempty" json:"datacenter,omitempty"` + // The hex representation of the unique ID for this virtual disk. If uuid is not set or missing, + // a random UUID is generated and assigned. + Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty"` +} + +func init() { + t["SetVirtualDiskUuidExRequestType"] = reflect.TypeOf((*SetVirtualDiskUuidExRequestType)(nil)).Elem() + minAPIVersionForType["SetVirtualDiskUuidExRequestType"] = "8.0.3.0" +} + +type SetVirtualDiskUuidEx_Task SetVirtualDiskUuidExRequestType + +func init() { + t["SetVirtualDiskUuidEx_Task"] = reflect.TypeOf((*SetVirtualDiskUuidEx_Task)(nil)).Elem() +} + +type SetVirtualDiskUuidEx_TaskResponse struct { + Returnval ManagedObjectReference `xml:"returnval" json:"returnval"` +} + // The parameters of `VirtualDiskManager.SetVirtualDiskUuid`. type SetVirtualDiskUuidRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` @@ -74014,7 +73654,6 @@ type SharesOption struct { func init() { t["SharesOption"] = reflect.TypeOf((*SharesOption)(nil)).Elem() - minAPIVersionForType["SharesOption"] = "4.1" } // This exception is thrown when VirtualMachine.shrinkDisk @@ -74028,7 +73667,6 @@ type ShrinkDiskFault struct { func init() { t["ShrinkDiskFault"] = reflect.TypeOf((*ShrinkDiskFault)(nil)).Elem() - minAPIVersionForType["ShrinkDiskFault"] = "5.1" } type ShrinkDiskFaultFault ShrinkDiskFault @@ -74124,7 +73762,6 @@ type SingleIp struct { func init() { t["SingleIp"] = reflect.TypeOf((*SingleIp)(nil)).Elem() - minAPIVersionForType["SingleIp"] = "5.5" } // This class defines a Single MAC address. @@ -74140,7 +73777,6 @@ type SingleMac struct { func init() { t["SingleMac"] = reflect.TypeOf((*SingleMac)(nil)).Elem() - minAPIVersionForType["SingleMac"] = "5.5" } // This data object type represents the external site-related capabilities @@ -74151,7 +73787,6 @@ type SiteInfo struct { func init() { t["SiteInfo"] = reflect.TypeOf((*SiteInfo)(nil)).Elem() - minAPIVersionForType["SiteInfo"] = "7.0" } // An attempt is being made to copy a virtual machine's disk that has @@ -74165,7 +73800,6 @@ type SnapshotCloneNotSupported struct { func init() { t["SnapshotCloneNotSupported"] = reflect.TypeOf((*SnapshotCloneNotSupported)(nil)).Elem() - minAPIVersionForType["SnapshotCloneNotSupported"] = "2.5" } type SnapshotCloneNotSupportedFault SnapshotCloneNotSupported @@ -74204,7 +73838,6 @@ type SnapshotDisabled struct { func init() { t["SnapshotDisabled"] = reflect.TypeOf((*SnapshotDisabled)(nil)).Elem() - minAPIVersionForType["SnapshotDisabled"] = "2.5" } type SnapshotDisabledFault SnapshotDisabled @@ -74262,7 +73895,6 @@ type SnapshotLocked struct { func init() { t["SnapshotLocked"] = reflect.TypeOf((*SnapshotLocked)(nil)).Elem() - minAPIVersionForType["SnapshotLocked"] = "2.5" } type SnapshotLockedFault SnapshotLocked @@ -74281,7 +73913,6 @@ type SnapshotMoveFromNonHomeNotSupported struct { func init() { t["SnapshotMoveFromNonHomeNotSupported"] = reflect.TypeOf((*SnapshotMoveFromNonHomeNotSupported)(nil)).Elem() - minAPIVersionForType["SnapshotMoveFromNonHomeNotSupported"] = "2.5" } type SnapshotMoveFromNonHomeNotSupportedFault SnapshotMoveFromNonHomeNotSupported @@ -74299,7 +73930,6 @@ type SnapshotMoveNotSupported struct { func init() { t["SnapshotMoveNotSupported"] = reflect.TypeOf((*SnapshotMoveNotSupported)(nil)).Elem() - minAPIVersionForType["SnapshotMoveNotSupported"] = "2.5" } type SnapshotMoveNotSupportedFault SnapshotMoveNotSupported @@ -74318,7 +73948,6 @@ type SnapshotMoveToNonHomeNotSupported struct { func init() { t["SnapshotMoveToNonHomeNotSupported"] = reflect.TypeOf((*SnapshotMoveToNonHomeNotSupported)(nil)).Elem() - minAPIVersionForType["SnapshotMoveToNonHomeNotSupported"] = "2.5" } type SnapshotMoveToNonHomeNotSupportedFault SnapshotMoveToNonHomeNotSupported @@ -74339,7 +73968,6 @@ type SnapshotNoChange struct { func init() { t["SnapshotNoChange"] = reflect.TypeOf((*SnapshotNoChange)(nil)).Elem() - minAPIVersionForType["SnapshotNoChange"] = "2.5" } type SnapshotNoChangeFault SnapshotNoChange @@ -74382,6 +74010,20 @@ func init() { t["SnapshotRevertIssueFault"] = reflect.TypeOf((*SnapshotRevertIssueFault)(nil)).Elem() } +// This data type defines the filtering specification for removing snapshots +// from virtual machine. +type SnapshotSelectionSpec struct { + DynamicData + + // This is the property to select snapshots older than X days. + RetentionDays int32 `xml:"retentionDays,omitempty" json:"retentionDays,omitempty"` +} + +func init() { + t["SnapshotSelectionSpec"] = reflect.TypeOf((*SnapshotSelectionSpec)(nil)).Elem() + minAPIVersionForType["SnapshotSelectionSpec"] = "8.0.3.0" +} + // The current DRS migration priority setting prevents generating // a recommendation to correct the soft VM/Host affinity rules constraint // violation for the VM so the violation will not be corrected. @@ -74395,7 +74037,6 @@ type SoftRuleVioCorrectionDisallowed struct { func init() { t["SoftRuleVioCorrectionDisallowed"] = reflect.TypeOf((*SoftRuleVioCorrectionDisallowed)(nil)).Elem() - minAPIVersionForType["SoftRuleVioCorrectionDisallowed"] = "4.1" } type SoftRuleVioCorrectionDisallowedFault SoftRuleVioCorrectionDisallowed @@ -74417,7 +74058,6 @@ type SoftRuleVioCorrectionImpact struct { func init() { t["SoftRuleVioCorrectionImpact"] = reflect.TypeOf((*SoftRuleVioCorrectionImpact)(nil)).Elem() - minAPIVersionForType["SoftRuleVioCorrectionImpact"] = "4.1" } type SoftRuleVioCorrectionImpactFault SoftRuleVioCorrectionImpact @@ -74487,7 +74127,6 @@ type SoftwarePackage struct { func init() { t["SoftwarePackage"] = reflect.TypeOf((*SoftwarePackage)(nil)).Elem() - minAPIVersionForType["SoftwarePackage"] = "6.5" } type SoftwarePackageCapability struct { @@ -74518,7 +74157,6 @@ type SolutionUserRequired struct { func init() { t["SolutionUserRequired"] = reflect.TypeOf((*SolutionUserRequired)(nil)).Elem() - minAPIVersionForType["SolutionUserRequired"] = "7.0" } type SolutionUserRequiredFault SolutionUserRequired @@ -74544,7 +74182,6 @@ type SourceNodeSpec struct { func init() { t["SourceNodeSpec"] = reflect.TypeOf((*SourceNodeSpec)(nil)).Elem() - minAPIVersionForType["SourceNodeSpec"] = "6.5" } // A SsdDiskNotAvailable fault indicating that the specified SSD @@ -74563,7 +74200,6 @@ type SsdDiskNotAvailable struct { func init() { t["SsdDiskNotAvailable"] = reflect.TypeOf((*SsdDiskNotAvailable)(nil)).Elem() - minAPIVersionForType["SsdDiskNotAvailable"] = "5.5" } type SsdDiskNotAvailableFault SsdDiskNotAvailable @@ -74788,9 +74424,9 @@ type StateAlarmExpression struct { // Path of the state property. // // The supported values: - // - for vim.VirtualMachine type: - // - runtime.powerState or summary.quickStats.guestHeartbeatStatus - // - for vim.HostSystem type: runtime.connectionState + // - for vim.VirtualMachine type: + // - runtime.powerState or summary.quickStats.guestHeartbeatStatus + // - for vim.HostSystem type: runtime.connectionState StatePath string `xml:"statePath" json:"statePath"` // Whether or not to test for a yellow condition. // @@ -74814,12 +74450,11 @@ type StaticRouteProfile struct { ApplyProfile // Linkable identifier. - Key string `xml:"key,omitempty" json:"key,omitempty" vim:"5.1"` + Key string `xml:"key,omitempty" json:"key,omitempty"` } func init() { t["StaticRouteProfile"] = reflect.TypeOf((*StaticRouteProfile)(nil)).Elem() - minAPIVersionForType["StaticRouteProfile"] = "4.0" } type StopRecordingRequestType struct { @@ -74891,6 +74526,8 @@ type StorageDrsAutomationConfig struct { // overrides the datastore cluster level automation behavior defined in the // `StorageDrsPodConfigInfo`. SpaceLoadBalanceAutomationMode string `xml:"spaceLoadBalanceAutomationMode,omitempty" json:"spaceLoadBalanceAutomationMode,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Specifies the behavior of Storage DRS when it generates // recommendations for correcting I/O load imbalance in a datastore // cluster. @@ -74930,7 +74567,6 @@ type StorageDrsAutomationConfig struct { func init() { t["StorageDrsAutomationConfig"] = reflect.TypeOf((*StorageDrsAutomationConfig)(nil)).Elem() - minAPIVersionForType["StorageDrsAutomationConfig"] = "6.0" } // This fault is thrown because Storage DRS cannot generate recommendations @@ -74942,7 +74578,6 @@ type StorageDrsCannotMoveDiskInMultiWriterMode struct { func init() { t["StorageDrsCannotMoveDiskInMultiWriterMode"] = reflect.TypeOf((*StorageDrsCannotMoveDiskInMultiWriterMode)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveDiskInMultiWriterMode"] = "5.1" } type StorageDrsCannotMoveDiskInMultiWriterModeFault StorageDrsCannotMoveDiskInMultiWriterMode @@ -74959,7 +74594,6 @@ type StorageDrsCannotMoveFTVm struct { func init() { t["StorageDrsCannotMoveFTVm"] = reflect.TypeOf((*StorageDrsCannotMoveFTVm)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveFTVm"] = "5.1" } type StorageDrsCannotMoveFTVmFault StorageDrsCannotMoveFTVm @@ -74976,7 +74610,6 @@ type StorageDrsCannotMoveIndependentDisk struct { func init() { t["StorageDrsCannotMoveIndependentDisk"] = reflect.TypeOf((*StorageDrsCannotMoveIndependentDisk)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveIndependentDisk"] = "5.1" } type StorageDrsCannotMoveIndependentDiskFault StorageDrsCannotMoveIndependentDisk @@ -74994,7 +74627,6 @@ type StorageDrsCannotMoveManuallyPlacedSwapFile struct { func init() { t["StorageDrsCannotMoveManuallyPlacedSwapFile"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedSwapFile)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveManuallyPlacedSwapFile"] = "5.1" } type StorageDrsCannotMoveManuallyPlacedSwapFileFault StorageDrsCannotMoveManuallyPlacedSwapFile @@ -75011,7 +74643,6 @@ type StorageDrsCannotMoveManuallyPlacedVm struct { func init() { t["StorageDrsCannotMoveManuallyPlacedVm"] = reflect.TypeOf((*StorageDrsCannotMoveManuallyPlacedVm)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveManuallyPlacedVm"] = "5.1" } type StorageDrsCannotMoveManuallyPlacedVmFault StorageDrsCannotMoveManuallyPlacedVm @@ -75028,7 +74659,6 @@ type StorageDrsCannotMoveSharedDisk struct { func init() { t["StorageDrsCannotMoveSharedDisk"] = reflect.TypeOf((*StorageDrsCannotMoveSharedDisk)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveSharedDisk"] = "5.1" } type StorageDrsCannotMoveSharedDiskFault StorageDrsCannotMoveSharedDisk @@ -75045,7 +74675,6 @@ type StorageDrsCannotMoveTemplate struct { func init() { t["StorageDrsCannotMoveTemplate"] = reflect.TypeOf((*StorageDrsCannotMoveTemplate)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveTemplate"] = "5.1" } type StorageDrsCannotMoveTemplateFault StorageDrsCannotMoveTemplate @@ -75062,7 +74691,6 @@ type StorageDrsCannotMoveVmInUserFolder struct { func init() { t["StorageDrsCannotMoveVmInUserFolder"] = reflect.TypeOf((*StorageDrsCannotMoveVmInUserFolder)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveVmInUserFolder"] = "5.1" } type StorageDrsCannotMoveVmInUserFolderFault StorageDrsCannotMoveVmInUserFolder @@ -75079,7 +74707,6 @@ type StorageDrsCannotMoveVmWithMountedCDROM struct { func init() { t["StorageDrsCannotMoveVmWithMountedCDROM"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithMountedCDROM)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveVmWithMountedCDROM"] = "5.1" } type StorageDrsCannotMoveVmWithMountedCDROMFault StorageDrsCannotMoveVmWithMountedCDROM @@ -75096,7 +74723,6 @@ type StorageDrsCannotMoveVmWithNoFilesInLayout struct { func init() { t["StorageDrsCannotMoveVmWithNoFilesInLayout"] = reflect.TypeOf((*StorageDrsCannotMoveVmWithNoFilesInLayout)(nil)).Elem() - minAPIVersionForType["StorageDrsCannotMoveVmWithNoFilesInLayout"] = "5.1" } type StorageDrsCannotMoveVmWithNoFilesInLayoutFault StorageDrsCannotMoveVmWithNoFilesInLayout @@ -75125,7 +74751,6 @@ type StorageDrsConfigInfo struct { func init() { t["StorageDrsConfigInfo"] = reflect.TypeOf((*StorageDrsConfigInfo)(nil)).Elem() - minAPIVersionForType["StorageDrsConfigInfo"] = "5.0" } // The `StorageDrsConfigSpec` data object provides a set of update @@ -75144,7 +74769,6 @@ type StorageDrsConfigSpec struct { func init() { t["StorageDrsConfigSpec"] = reflect.TypeOf((*StorageDrsConfigSpec)(nil)).Elem() - minAPIVersionForType["StorageDrsConfigSpec"] = "5.0" } // This fault is thrown when one datastore using Storage DRS is added to two @@ -75155,7 +74779,6 @@ type StorageDrsDatacentersCannotShareDatastore struct { func init() { t["StorageDrsDatacentersCannotShareDatastore"] = reflect.TypeOf((*StorageDrsDatacentersCannotShareDatastore)(nil)).Elem() - minAPIVersionForType["StorageDrsDatacentersCannotShareDatastore"] = "5.5" } type StorageDrsDatacentersCannotShareDatastoreFault StorageDrsDatacentersCannotShareDatastore @@ -75172,7 +74795,6 @@ type StorageDrsDisabledOnVm struct { func init() { t["StorageDrsDisabledOnVm"] = reflect.TypeOf((*StorageDrsDisabledOnVm)(nil)).Elem() - minAPIVersionForType["StorageDrsDisabledOnVm"] = "5.0" } type StorageDrsDisabledOnVmFault StorageDrsDisabledOnVm @@ -75193,7 +74815,6 @@ type StorageDrsHbrDiskNotMovable struct { func init() { t["StorageDrsHbrDiskNotMovable"] = reflect.TypeOf((*StorageDrsHbrDiskNotMovable)(nil)).Elem() - minAPIVersionForType["StorageDrsHbrDiskNotMovable"] = "6.0" } type StorageDrsHbrDiskNotMovableFault StorageDrsHbrDiskNotMovable @@ -75210,7 +74831,6 @@ type StorageDrsHmsMoveInProgress struct { func init() { t["StorageDrsHmsMoveInProgress"] = reflect.TypeOf((*StorageDrsHmsMoveInProgress)(nil)).Elem() - minAPIVersionForType["StorageDrsHmsMoveInProgress"] = "6.0" } type StorageDrsHmsMoveInProgressFault StorageDrsHmsMoveInProgress @@ -75227,7 +74847,6 @@ type StorageDrsHmsUnreachable struct { func init() { t["StorageDrsHmsUnreachable"] = reflect.TypeOf((*StorageDrsHmsUnreachable)(nil)).Elem() - minAPIVersionForType["StorageDrsHmsUnreachable"] = "6.0" } type StorageDrsHmsUnreachableFault StorageDrsHmsUnreachable @@ -75236,6 +74855,8 @@ func init() { t["StorageDrsHmsUnreachableFault"] = reflect.TypeOf((*StorageDrsHmsUnreachableFault)(nil)).Elem() } +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// // Storage DRS configuration for I/O load balancing. type StorageDrsIoLoadBalanceConfig struct { DynamicData @@ -75254,7 +74875,7 @@ type StorageDrsIoLoadBalanceConfig struct { // // The valid values are in the range of 30 (i.e., 30%) to 100 (i.e., 100%). // If not specified, the default value is 60%. - ReservablePercentThreshold int32 `xml:"reservablePercentThreshold,omitempty" json:"reservablePercentThreshold,omitempty" vim:"6.0"` + ReservablePercentThreshold int32 `xml:"reservablePercentThreshold,omitempty" json:"reservablePercentThreshold,omitempty"` // Storage DRS makes storage migration recommendations if total // IOPs reservation of all VMs running on a datastore is higher than // the specified threshold. @@ -75268,7 +74889,7 @@ type StorageDrsIoLoadBalanceConfig struct { // should be based on conservative estimate of storage performance, // and ideally should be set to about 50-60% of worse case peak // performance of backing LUN. - ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty" json:"reservableIopsThreshold,omitempty" vim:"6.0"` + ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty" json:"reservableIopsThreshold,omitempty"` // Determines which reservation threshold specification to use. // // See `StorageDrsPodConfigInfoBehavior_enum`. If unspecified, the @@ -75276,7 +74897,7 @@ type StorageDrsIoLoadBalanceConfig struct { // percentage value in that case. // If mode is specified, but corresponding reservationThreshold // value is absent, option specific defaults are used. - ReservableThresholdMode string `xml:"reservableThresholdMode,omitempty" json:"reservableThresholdMode,omitempty" vim:"6.0"` + ReservableThresholdMode string `xml:"reservableThresholdMode,omitempty" json:"reservableThresholdMode,omitempty"` // Storage DRS makes storage migration recommendations if // I/O latency on one (or more) of the datastores is higher than // the specified threshold. @@ -75296,9 +74917,10 @@ type StorageDrsIoLoadBalanceConfig struct { func init() { t["StorageDrsIoLoadBalanceConfig"] = reflect.TypeOf((*StorageDrsIoLoadBalanceConfig)(nil)).Elem() - minAPIVersionForType["StorageDrsIoLoadBalanceConfig"] = "5.0" } +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// // The fault occurs when Storage DRS disables IO Load balancing internally // even though it is enabled by the user. // @@ -75313,7 +74935,6 @@ type StorageDrsIolbDisabledInternally struct { func init() { t["StorageDrsIolbDisabledInternally"] = reflect.TypeOf((*StorageDrsIolbDisabledInternally)(nil)).Elem() - minAPIVersionForType["StorageDrsIolbDisabledInternally"] = "5.0" } type StorageDrsIolbDisabledInternallyFault StorageDrsIolbDisabledInternally @@ -75331,7 +74952,6 @@ type StorageDrsOptionSpec struct { func init() { t["StorageDrsOptionSpec"] = reflect.TypeOf((*StorageDrsOptionSpec)(nil)).Elem() - minAPIVersionForType["StorageDrsOptionSpec"] = "5.0" } type StorageDrsPlacementRankVmSpec struct { @@ -75356,6 +74976,8 @@ type StorageDrsPodConfigInfo struct { // Flag indicating whether or not storage DRS is enabled. Enabled bool `xml:"enabled" json:"enabled"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Flag indicating whether or not storage DRS takes into account storage I/O // workload when making load balancing and initial placement recommendations. IoLoadBalanceEnabled bool `xml:"ioLoadBalanceEnabled" json:"ioLoadBalanceEnabled"` @@ -75384,13 +75006,15 @@ type StorageDrsPodConfigInfo struct { DefaultIntraVmAffinity *bool `xml:"defaultIntraVmAffinity" json:"defaultIntraVmAffinity,omitempty"` // The configuration settings for load balancing storage space. SpaceLoadBalanceConfig *StorageDrsSpaceLoadBalanceConfig `xml:"spaceLoadBalanceConfig,omitempty" json:"spaceLoadBalanceConfig,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // The configuration settings for load balancing I/O workload. // // This takes effect only if `StorageDrsPodConfigInfo.ioLoadBalanceEnabled` is true. IoLoadBalanceConfig *StorageDrsIoLoadBalanceConfig `xml:"ioLoadBalanceConfig,omitempty" json:"ioLoadBalanceConfig,omitempty"` // Configuration settings for fine-grain automation overrides on // the cluster level setting. - AutomationOverrides *StorageDrsAutomationConfig `xml:"automationOverrides,omitempty" json:"automationOverrides,omitempty" vim:"6.0"` + AutomationOverrides *StorageDrsAutomationConfig `xml:"automationOverrides,omitempty" json:"automationOverrides,omitempty"` // Pod-wide rules. Rule []BaseClusterRuleInfo `xml:"rule,omitempty,typeattr" json:"rule,omitempty"` // Advanced settings. @@ -75399,7 +75023,6 @@ type StorageDrsPodConfigInfo struct { func init() { t["StorageDrsPodConfigInfo"] = reflect.TypeOf((*StorageDrsPodConfigInfo)(nil)).Elem() - minAPIVersionForType["StorageDrsPodConfigInfo"] = "5.0" } // The `StorageDrsPodConfigSpec` data object provides a set of update @@ -75412,6 +75035,8 @@ type StorageDrsPodConfigSpec struct { // Flag indicating whether or not storage DRS is enabled. Enabled *bool `xml:"enabled" json:"enabled,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Flag indicating whether or not storage DRS takes into account storage I/O // workload when making load balancing and initial placement recommendations. IoLoadBalanceEnabled *bool `xml:"ioLoadBalanceEnabled" json:"ioLoadBalanceEnabled,omitempty"` @@ -75429,13 +75054,15 @@ type StorageDrsPodConfigSpec struct { DefaultIntraVmAffinity *bool `xml:"defaultIntraVmAffinity" json:"defaultIntraVmAffinity,omitempty"` // The configuration settings for load balancing storage space. SpaceLoadBalanceConfig *StorageDrsSpaceLoadBalanceConfig `xml:"spaceLoadBalanceConfig,omitempty" json:"spaceLoadBalanceConfig,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // The configuration settings for load balancing I/O workload. // // This takes effect only if `StorageDrsPodConfigInfo.ioLoadBalanceEnabled` is true. IoLoadBalanceConfig *StorageDrsIoLoadBalanceConfig `xml:"ioLoadBalanceConfig,omitempty" json:"ioLoadBalanceConfig,omitempty"` // Configuration settings for fine-grain automation overrides on // the cluster level setting. - AutomationOverrides *StorageDrsAutomationConfig `xml:"automationOverrides,omitempty" json:"automationOverrides,omitempty" vim:"6.0"` + AutomationOverrides *StorageDrsAutomationConfig `xml:"automationOverrides,omitempty" json:"automationOverrides,omitempty"` // Changes to the set of rules. Rule []ClusterRuleSpec `xml:"rule,omitempty" json:"rule,omitempty"` // Changes to advance settings. @@ -75444,7 +75071,6 @@ type StorageDrsPodConfigSpec struct { func init() { t["StorageDrsPodConfigSpec"] = reflect.TypeOf((*StorageDrsPodConfigSpec)(nil)).Elem() - minAPIVersionForType["StorageDrsPodConfigSpec"] = "5.0" } // Specification for moving or copying a virtual machine to a different Storage Pod. @@ -75463,7 +75089,6 @@ type StorageDrsPodSelectionSpec struct { func init() { t["StorageDrsPodSelectionSpec"] = reflect.TypeOf((*StorageDrsPodSelectionSpec)(nil)).Elem() - minAPIVersionForType["StorageDrsPodSelectionSpec"] = "5.0" } // This fault is thrown when Storage DRS cannot move disks of a virtual machine @@ -75474,7 +75099,6 @@ type StorageDrsRelocateDisabled struct { func init() { t["StorageDrsRelocateDisabled"] = reflect.TypeOf((*StorageDrsRelocateDisabled)(nil)).Elem() - minAPIVersionForType["StorageDrsRelocateDisabled"] = "6.0" } type StorageDrsRelocateDisabledFault StorageDrsRelocateDisabled @@ -75503,7 +75127,7 @@ type StorageDrsSpaceLoadBalanceConfig struct { // The maximum value is limited by the capacity of the smallest // datastore in a datastore cluster. // If not specified, the default value is 50GB. - FreeSpaceThresholdGB int32 `xml:"freeSpaceThresholdGB,omitempty" json:"freeSpaceThresholdGB,omitempty" vim:"6.0"` + FreeSpaceThresholdGB int32 `xml:"freeSpaceThresholdGB,omitempty" json:"freeSpaceThresholdGB,omitempty"` // Storage DRS considers making storage migration recommendations if // the difference in space utilization between the source and destination datastores // is higher than the specified threshold. @@ -75515,7 +75139,6 @@ type StorageDrsSpaceLoadBalanceConfig struct { func init() { t["StorageDrsSpaceLoadBalanceConfig"] = reflect.TypeOf((*StorageDrsSpaceLoadBalanceConfig)(nil)).Elem() - minAPIVersionForType["StorageDrsSpaceLoadBalanceConfig"] = "5.0" } // This fault is thrown when Storage DRS action for relocating @@ -75527,7 +75150,6 @@ type StorageDrsStaleHmsCollection struct { func init() { t["StorageDrsStaleHmsCollection"] = reflect.TypeOf((*StorageDrsStaleHmsCollection)(nil)).Elem() - minAPIVersionForType["StorageDrsStaleHmsCollection"] = "6.0" } type StorageDrsStaleHmsCollectionFault StorageDrsStaleHmsCollection @@ -75544,7 +75166,6 @@ type StorageDrsUnableToMoveFiles struct { func init() { t["StorageDrsUnableToMoveFiles"] = reflect.TypeOf((*StorageDrsUnableToMoveFiles)(nil)).Elem() - minAPIVersionForType["StorageDrsUnableToMoveFiles"] = "5.1" } type StorageDrsUnableToMoveFilesFault StorageDrsUnableToMoveFiles @@ -75598,12 +75219,11 @@ type StorageDrsVmConfigInfo struct { // virtual machine. IntraVmAntiAffinity *VirtualDiskAntiAffinityRuleSpec `xml:"intraVmAntiAffinity,omitempty" json:"intraVmAntiAffinity,omitempty"` // List of the virtual disk rules that can be overridden/created. - VirtualDiskRules []VirtualDiskRuleSpec `xml:"virtualDiskRules,omitempty" json:"virtualDiskRules,omitempty" vim:"6.7"` + VirtualDiskRules []VirtualDiskRuleSpec `xml:"virtualDiskRules,omitempty" json:"virtualDiskRules,omitempty"` } func init() { t["StorageDrsVmConfigInfo"] = reflect.TypeOf((*StorageDrsVmConfigInfo)(nil)).Elem() - minAPIVersionForType["StorageDrsVmConfigInfo"] = "5.0" } // Updates the per-virtual-machine storage DRS configuration. @@ -75615,9 +75235,10 @@ type StorageDrsVmConfigSpec struct { func init() { t["StorageDrsVmConfigSpec"] = reflect.TypeOf((*StorageDrsVmConfigSpec)(nil)).Elem() - minAPIVersionForType["StorageDrsVmConfigSpec"] = "5.0" } +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// // The IOAllocationInfo specifies the shares, limit and reservation // for storage I/O resource. // @@ -75646,6 +75267,8 @@ type StorageIOAllocationInfo struct { // a default value of -1 will be returned, which indicates that there is no // limit on resource usage. Limit *int64 `xml:"limit" json:"limit,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Shares are used in case of resource contention. // // The value should be within a range of 200 to 4000. @@ -75655,6 +75278,8 @@ type StorageIOAllocationInfo struct { // a default value of `SharesInfo.level` = normal, // `SharesInfo.shares` = 1000 will be returned. Shares *SharesInfo `xml:"shares,omitempty" json:"shares,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Reservation control is used to provide guaranteed allocation in terms // of IOPS. // @@ -75664,14 +75289,15 @@ type StorageIOAllocationInfo struct { // on shared storage based on integration with Storage IO Control. // Also right now we don't do any admission control based on IO // reservation values. - Reservation *int32 `xml:"reservation" json:"reservation,omitempty" vim:"5.5"` + Reservation *int32 `xml:"reservation" json:"reservation,omitempty"` } func init() { t["StorageIOAllocationInfo"] = reflect.TypeOf((*StorageIOAllocationInfo)(nil)).Elem() - minAPIVersionForType["StorageIOAllocationInfo"] = "4.1" } +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// // The IOAllocationOption specifies value ranges that can be used // to initialize `StorageIOAllocationInfo` object. type StorageIOAllocationOption struct { @@ -75687,9 +75313,10 @@ type StorageIOAllocationOption struct { func init() { t["StorageIOAllocationOption"] = reflect.TypeOf((*StorageIOAllocationOption)(nil)).Elem() - minAPIVersionForType["StorageIOAllocationOption"] = "4.1" } +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// // Configuration setting ranges for `StorageIORMConfigSpec` object. type StorageIORMConfigOption struct { DynamicData @@ -75702,17 +75329,20 @@ type StorageIORMConfigOption struct { CongestionThresholdOption IntOption `xml:"congestionThresholdOption" json:"congestionThresholdOption"` // statsCollectionEnabledOption provides default value for // `StorageIORMConfigSpec.statsCollectionEnabled` - StatsCollectionEnabledOption *BoolOption `xml:"statsCollectionEnabledOption,omitempty" json:"statsCollectionEnabledOption,omitempty" vim:"5.0"` + StatsCollectionEnabledOption *BoolOption `xml:"statsCollectionEnabledOption,omitempty" json:"statsCollectionEnabledOption,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // reservationEnabledOption provides default value for // `StorageIORMConfigSpec.reservationEnabled` - ReservationEnabledOption *BoolOption `xml:"reservationEnabledOption,omitempty" json:"reservationEnabledOption,omitempty" vim:"6.0"` + ReservationEnabledOption *BoolOption `xml:"reservationEnabledOption,omitempty" json:"reservationEnabledOption,omitempty"` } func init() { t["StorageIORMConfigOption"] = reflect.TypeOf((*StorageIORMConfigOption)(nil)).Elem() - minAPIVersionForType["StorageIORMConfigOption"] = "4.1" } +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// // Configuration settings used for creating or reconfiguring // storage I/O resource management. // @@ -75726,7 +75356,7 @@ type StorageIORMConfigSpec struct { // Mode of congestion threshold specification // For more information, see // `StorageIORMThresholdMode_enum` - CongestionThresholdMode string `xml:"congestionThresholdMode,omitempty" json:"congestionThresholdMode,omitempty" vim:"5.1"` + CongestionThresholdMode string `xml:"congestionThresholdMode,omitempty" json:"congestionThresholdMode,omitempty"` // The latency beyond which the storage array is considered congested. // // For more information, see @@ -75739,26 +75369,32 @@ type StorageIORMConfigSpec struct { // // For more information, see // `StorageIORMInfo.congestionThreshold` - PercentOfPeakThroughput int32 `xml:"percentOfPeakThroughput,omitempty" json:"percentOfPeakThroughput,omitempty" vim:"5.1"` + PercentOfPeakThroughput int32 `xml:"percentOfPeakThroughput,omitempty" json:"percentOfPeakThroughput,omitempty"` // Flag indicating whether the service is enabled in stats collection mode. - StatsCollectionEnabled *bool `xml:"statsCollectionEnabled" json:"statsCollectionEnabled,omitempty" vim:"5.0"` + StatsCollectionEnabled *bool `xml:"statsCollectionEnabled" json:"statsCollectionEnabled,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Flag indicating whether IO reservations support is enabled. - ReservationEnabled *bool `xml:"reservationEnabled" json:"reservationEnabled,omitempty" vim:"6.0"` + ReservationEnabled *bool `xml:"reservationEnabled" json:"reservationEnabled,omitempty"` // Flag indicating whether stats aggregation is disabled. - StatsAggregationDisabled *bool `xml:"statsAggregationDisabled" json:"statsAggregationDisabled,omitempty" vim:"5.0"` + StatsAggregationDisabled *bool `xml:"statsAggregationDisabled" json:"statsAggregationDisabled,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Storage DRS makes storage migration recommendations // if total IOPs reservation for all VMs running on the // datastore is higher than specified threshold value. // // This value (if present) overrides - ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty" json:"reservableIopsThreshold,omitempty" vim:"6.0"` + // `vim.StorageResourceManager.PodConfigInfo.reservableIopsThreshold` + ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty" json:"reservableIopsThreshold,omitempty"` } func init() { t["StorageIORMConfigSpec"] = reflect.TypeOf((*StorageIORMConfigSpec)(nil)).Elem() - minAPIVersionForType["StorageIORMConfigSpec"] = "4.1" } +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// // Configuration of storage I/O resource management. type StorageIORMInfo struct { DynamicData @@ -75768,7 +75404,7 @@ type StorageIORMInfo struct { // Mode of congestion threshold specification // For more information, see // `StorageIORMThresholdMode_enum` - CongestionThresholdMode string `xml:"congestionThresholdMode,omitempty" json:"congestionThresholdMode,omitempty" vim:"5.1"` + CongestionThresholdMode string `xml:"congestionThresholdMode,omitempty" json:"congestionThresholdMode,omitempty"` // The latency beyond which the storage array is considered congested. // // If storage I/O resource management is enabled on a datastore, @@ -75783,26 +75419,30 @@ type StorageIORMInfo struct { // // For more information, see // `StorageIORMInfo.congestionThreshold` - PercentOfPeakThroughput int32 `xml:"percentOfPeakThroughput,omitempty" json:"percentOfPeakThroughput,omitempty" vim:"5.1"` + PercentOfPeakThroughput int32 `xml:"percentOfPeakThroughput,omitempty" json:"percentOfPeakThroughput,omitempty"` // Deprecated as of vSphere API 6.5, use `StorageIORMInfo.enabled` instead. // // Flag indicating whether service is running in stats collection mode. - StatsCollectionEnabled *bool `xml:"statsCollectionEnabled" json:"statsCollectionEnabled,omitempty" vim:"5.0"` + StatsCollectionEnabled *bool `xml:"statsCollectionEnabled" json:"statsCollectionEnabled,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Flag indicating whether IO reservations support is enabled. - ReservationEnabled *bool `xml:"reservationEnabled" json:"reservationEnabled,omitempty" vim:"6.0"` + ReservationEnabled *bool `xml:"reservationEnabled" json:"reservationEnabled,omitempty"` // Flag indicating whether stats aggregation is disabled. - StatsAggregationDisabled *bool `xml:"statsAggregationDisabled" json:"statsAggregationDisabled,omitempty" vim:"5.0"` + StatsAggregationDisabled *bool `xml:"statsAggregationDisabled" json:"statsAggregationDisabled,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Storage DRS makes storage migration recommendations // if total IOPs reservation for all VMs running on the // datastore is higher than specified threshold value. // // This value (if present) overrides - ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty" json:"reservableIopsThreshold,omitempty" vim:"6.0"` + // `vim.StorageResourceManager.PodConfigInfo.reservableIopsThreshold` + ReservableIopsThreshold int32 `xml:"reservableIopsThreshold,omitempty" json:"reservableIopsThreshold,omitempty"` } func init() { t["StorageIORMInfo"] = reflect.TypeOf((*StorageIORMInfo)(nil)).Elem() - minAPIVersionForType["StorageIORMInfo"] = "4.1" } // Describes a single storage migration action. @@ -75851,11 +75491,15 @@ type StorageMigrationAction struct { // Unit: percentage. For example, if set to 70.0, space utilization is 70%. // If not set, the value is not available. SpaceUtilDstAfter float32 `xml:"spaceUtilDstAfter,omitempty" json:"spaceUtilDstAfter,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // I/O latency on the source datastore before storage migration. // // Unit: millisecond. // If not set, the value is not available. IoLatencySrcBefore float32 `xml:"ioLatencySrcBefore,omitempty" json:"ioLatencySrcBefore,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // I/O latency on the destination datastore before storage migration. // // Unit: millisecond. @@ -75865,9 +75509,10 @@ type StorageMigrationAction struct { func init() { t["StorageMigrationAction"] = reflect.TypeOf((*StorageMigrationAction)(nil)).Elem() - minAPIVersionForType["StorageMigrationAction"] = "5.0" } +// Deprecated as of vSphere8.0 U3, and there is no replacement for it. +// // Summary statistics for datastore performance // The statistics are reported in aggregated quantiles over a time period type StoragePerformanceSummary struct { @@ -75897,6 +75542,8 @@ type StoragePerformanceSummary struct { DatastoreReadIops []float64 `xml:"datastoreReadIops" json:"datastoreReadIops"` // Aggregated datastore Write IO rate (Writes/second) DatastoreWriteIops []float64 `xml:"datastoreWriteIops" json:"datastoreWriteIops"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Cumulative SIOC activity to satisfy SIOC latency threshold // setting. // @@ -75912,7 +75559,6 @@ type StoragePerformanceSummary struct { func init() { t["StoragePerformanceSummary"] = reflect.TypeOf((*StoragePerformanceSummary)(nil)).Elem() - minAPIVersionForType["StoragePerformanceSummary"] = "5.1" } // Describes a single storage initial placement action for placing a virtual @@ -75944,7 +75590,7 @@ type StoragePlacementAction struct { // Unit: percentage. For example, if set to 70.0, space demand is 70%. This // value include the space demanded by thin provisioned VMs. Hence, it may // be higher than 100%. If not set, the value is not available. - SpaceDemandBefore float32 `xml:"spaceDemandBefore,omitempty" json:"spaceDemandBefore,omitempty" vim:"6.0"` + SpaceDemandBefore float32 `xml:"spaceDemandBefore,omitempty" json:"spaceDemandBefore,omitempty"` // Space utilization on the target datastore after placing the virtual disk. // // Unit: percentage. For example, if set to 70.0, space utilization is 70%. @@ -75955,7 +75601,9 @@ type StoragePlacementAction struct { // Unit: percentage. For example, if set to 70.0, space demand is 70%. This // value include the space demanded by thin provisioned VMs. Hence, it may // be higher than 100%. If not set, the value is not available. - SpaceDemandAfter float32 `xml:"spaceDemandAfter,omitempty" json:"spaceDemandAfter,omitempty" vim:"6.0"` + SpaceDemandAfter float32 `xml:"spaceDemandAfter,omitempty" json:"spaceDemandAfter,omitempty"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Current I/O latency on the target datastore. // // Unit: millisecond. @@ -75965,7 +75613,6 @@ type StoragePlacementAction struct { func init() { t["StoragePlacementAction"] = reflect.TypeOf((*StoragePlacementAction)(nil)).Elem() - minAPIVersionForType["StoragePlacementAction"] = "5.0" } // Both `StorageResourceManager.RecommendDatastores` and @@ -75990,7 +75637,6 @@ type StoragePlacementResult struct { func init() { t["StoragePlacementResult"] = reflect.TypeOf((*StoragePlacementResult)(nil)).Elem() - minAPIVersionForType["StoragePlacementResult"] = "5.0" } // StoragePlacementSpec encapsulates all of the inputs passed to the @@ -76044,7 +75690,7 @@ type StoragePlacementSpec struct { // // If unset, default behavior is to allow such // prerequisite moves. - DisallowPrerequisiteMoves *bool `xml:"disallowPrerequisiteMoves" json:"disallowPrerequisiteMoves,omitempty" vim:"5.1"` + DisallowPrerequisiteMoves *bool `xml:"disallowPrerequisiteMoves" json:"disallowPrerequisiteMoves,omitempty"` // Resource lease duration in seconds. // // If the duration is within bounds, @@ -76052,12 +75698,11 @@ type StoragePlacementSpec struct { // generated as part of that call. // Only initial placement recommendations generated by storage DRS can reserve // resources this way. - ResourceLeaseDurationSec int32 `xml:"resourceLeaseDurationSec,omitempty" json:"resourceLeaseDurationSec,omitempty" vim:"5.1"` + ResourceLeaseDurationSec int32 `xml:"resourceLeaseDurationSec,omitempty" json:"resourceLeaseDurationSec,omitempty"` } func init() { t["StoragePlacementSpec"] = reflect.TypeOf((*StoragePlacementSpec)(nil)).Elem() - minAPIVersionForType["StoragePlacementSpec"] = "5.0" } // The `StoragePodSummary` data object @@ -76083,7 +75728,6 @@ type StoragePodSummary struct { func init() { t["StoragePodSummary"] = reflect.TypeOf((*StoragePodSummary)(nil)).Elem() - minAPIVersionForType["StoragePodSummary"] = "5.0" } // The `StorageProfile` data object represents the host storage configuration. @@ -76103,7 +75747,6 @@ type StorageProfile struct { func init() { t["StorageProfile"] = reflect.TypeOf((*StorageProfile)(nil)).Elem() - minAPIVersionForType["StorageProfile"] = "4.0" } // Describes the storage requirement to perform a consolidation @@ -76121,7 +75764,6 @@ type StorageRequirement struct { func init() { t["StorageRequirement"] = reflect.TypeOf((*StorageRequirement)(nil)).Elem() - minAPIVersionForType["StorageRequirement"] = "5.0" } // A data object to report aggregate storage statistics by storage @@ -76144,7 +75786,6 @@ type StorageResourceManagerStorageProfileStatistics struct { func init() { t["StorageResourceManagerStorageProfileStatistics"] = reflect.TypeOf((*StorageResourceManagerStorageProfileStatistics)(nil)).Elem() - minAPIVersionForType["StorageResourceManagerStorageProfileStatistics"] = "6.0" } // An operation on a powered-on virtual machine requests a change of storage @@ -76155,7 +75796,6 @@ type StorageVMotionNotSupported struct { func init() { t["StorageVMotionNotSupported"] = reflect.TypeOf((*StorageVMotionNotSupported)(nil)).Elem() - minAPIVersionForType["StorageVMotionNotSupported"] = "4.0" } type StorageVMotionNotSupportedFault StorageVMotionNotSupported @@ -76177,7 +75817,6 @@ type StorageVmotionIncompatible struct { func init() { t["StorageVmotionIncompatible"] = reflect.TypeOf((*StorageVmotionIncompatible)(nil)).Elem() - minAPIVersionForType["StorageVmotionIncompatible"] = "5.0" } type StorageVmotionIncompatibleFault StorageVmotionIncompatible @@ -76197,7 +75836,6 @@ type StringExpression struct { func init() { t["StringExpression"] = reflect.TypeOf((*StringExpression)(nil)).Elem() - minAPIVersionForType["StringExpression"] = "5.5" } // The StringOption data object type is used to define an open-ended @@ -76229,7 +75867,6 @@ type StringPolicy struct { func init() { t["StringPolicy"] = reflect.TypeOf((*StringPolicy)(nil)).Elem() - minAPIVersionForType["StringPolicy"] = "4.0" } // Implementation of `HostProfilesEntityCustomizations` @@ -76260,7 +75897,6 @@ type StructuredCustomizations struct { func init() { t["StructuredCustomizations"] = reflect.TypeOf((*StructuredCustomizations)(nil)).Elem() - minAPIVersionForType["StructuredCustomizations"] = "6.5" } type SuspendVAppRequestType struct { @@ -76331,7 +75967,6 @@ type SwapDatastoreNotWritableOnHost struct { func init() { t["SwapDatastoreNotWritableOnHost"] = reflect.TypeOf((*SwapDatastoreNotWritableOnHost)(nil)).Elem() - minAPIVersionForType["SwapDatastoreNotWritableOnHost"] = "2.5" } type SwapDatastoreNotWritableOnHostFault SwapDatastoreNotWritableOnHost @@ -76356,7 +75991,6 @@ type SwapDatastoreUnset struct { func init() { t["SwapDatastoreUnset"] = reflect.TypeOf((*SwapDatastoreUnset)(nil)).Elem() - minAPIVersionForType["SwapDatastoreUnset"] = "2.5" } type SwapDatastoreUnsetFault SwapDatastoreUnset @@ -76373,7 +76007,6 @@ type SwapPlacementOverrideNotSupported struct { func init() { t["SwapPlacementOverrideNotSupported"] = reflect.TypeOf((*SwapPlacementOverrideNotSupported)(nil)).Elem() - minAPIVersionForType["SwapPlacementOverrideNotSupported"] = "2.5" } type SwapPlacementOverrideNotSupportedFault SwapPlacementOverrideNotSupported @@ -76393,7 +76026,6 @@ type SwitchIpUnset struct { func init() { t["SwitchIpUnset"] = reflect.TypeOf((*SwitchIpUnset)(nil)).Elem() - minAPIVersionForType["SwitchIpUnset"] = "5.1" } type SwitchIpUnsetFault SwitchIpUnset @@ -76410,7 +76042,6 @@ type SwitchNotInUpgradeMode struct { func init() { t["SwitchNotInUpgradeMode"] = reflect.TypeOf((*SwitchNotInUpgradeMode)(nil)).Elem() - minAPIVersionForType["SwitchNotInUpgradeMode"] = "4.0" } type SwitchNotInUpgradeModeFault SwitchNotInUpgradeMode @@ -76471,7 +76102,6 @@ type SystemEventInfo struct { func init() { t["SystemEventInfo"] = reflect.TypeOf((*SystemEventInfo)(nil)).Elem() - minAPIVersionForType["SystemEventInfo"] = "6.5" } // Defines a tag that can be associated with a managed entity. @@ -76484,7 +76114,6 @@ type Tag struct { func init() { t["Tag"] = reflect.TypeOf((*Tag)(nil)).Elem() - minAPIVersionForType["Tag"] = "4.0" } // Static strings for task objects. @@ -76547,7 +76176,7 @@ type TaskFilterSpec struct { UserName *TaskFilterSpecByUsername `xml:"userName,omitempty" json:"userName,omitempty"` // This property, if provided, limits the set of collected tasks to those // associated with the specified activation Ids. - ActivationId []string `xml:"activationId,omitempty" json:"activationId,omitempty" vim:"6.0"` + ActivationId []string `xml:"activationId,omitempty" json:"activationId,omitempty"` // This property, if provided, limits the set of collected tasks by their states. // // Task states are enumerated in `State`. @@ -76574,7 +76203,7 @@ type TaskFilterSpec struct { // tasks not with the given `TaskInfo.eventChainId` will be // filtered out. If the property is not set, tasks' chain ID is disregarded // for filtering purposes. - EventChainId []int32 `xml:"eventChainId,omitempty" json:"eventChainId,omitempty" vim:"4.0"` + EventChainId []int32 `xml:"eventChainId,omitempty" json:"eventChainId,omitempty"` // The filter specification for retrieving tasks by // `tag`. // @@ -76582,21 +76211,21 @@ type TaskFilterSpec struct { // will be filtered out. If the property is not set, tasks' tag is disregarded for // filtering purposes. If it is set, and includes an empty string, tasks without a // tag will be returned. - Tag []string `xml:"tag,omitempty" json:"tag,omitempty" vim:"4.0"` + Tag []string `xml:"tag,omitempty" json:"tag,omitempty"` // The filter specification for retrieving tasks by // `TaskInfo.parentTaskKey`. // // If it is set, tasks not with the // given parentTaskKey(s) will be filtered out. If the property is not set, // tasks' parentTaskKey is disregarded for filtering purposes. - ParentTaskKey []string `xml:"parentTaskKey,omitempty" json:"parentTaskKey,omitempty" vim:"4.0"` + ParentTaskKey []string `xml:"parentTaskKey,omitempty" json:"parentTaskKey,omitempty"` // The filter specification for retrieving tasks by // `TaskInfo.rootTaskKey`. // // If it is set, tasks not with the // given rootTaskKey(s) will be filtered out. If the property is not set, // tasks' rootTaskKey is disregarded for filtering purposes. - RootTaskKey []string `xml:"rootTaskKey,omitempty" json:"rootTaskKey,omitempty" vim:"4.0"` + RootTaskKey []string `xml:"rootTaskKey,omitempty" json:"rootTaskKey,omitempty"` } func init() { @@ -76706,7 +76335,7 @@ type TaskInfo struct { // activity, this will be fixed and unchanging. // For tasks that have various substeps, this field will change // as the task progresses from one phase to another. - Description *LocalizableMessage `xml:"description,omitempty" json:"description,omitempty" vim:"4.0"` + Description *LocalizableMessage `xml:"description,omitempty" json:"description,omitempty"` // The name of the operation that created the task. // // This is not set @@ -76747,7 +76376,7 @@ type TaskInfo struct { // // If this property is not set, then the command does not report progress. Progress int32 `xml:"progress,omitempty" json:"progress,omitempty"` - ProgressDetails []KeyAnyValue `xml:"progressDetails,omitempty" json:"progressDetails,omitempty"` + ProgressDetails []KeyAnyValue `xml:"progressDetails,omitempty" json:"progressDetails,omitempty" vim:"8.0.1.0"` // Kind of entity responsible for creating this task. Reason BaseTaskReason `xml:"reason,typeattr" json:"reason"` // Time stamp when the task was created. @@ -76759,26 +76388,86 @@ type TaskInfo struct { // Event chain ID that leads to the corresponding events. EventChainId int32 `xml:"eventChainId" json:"eventChainId"` // The user entered tag to identify the operations and their side effects - ChangeTag string `xml:"changeTag,omitempty" json:"changeTag,omitempty" vim:"4.0"` + ChangeTag string `xml:"changeTag,omitempty" json:"changeTag,omitempty"` // Tasks can be created by another task. // // This shows `TaskInfo.key` of the task spun off this task. This is to // track causality between tasks. - ParentTaskKey string `xml:"parentTaskKey,omitempty" json:"parentTaskKey,omitempty" vim:"4.0"` + ParentTaskKey string `xml:"parentTaskKey,omitempty" json:"parentTaskKey,omitempty"` // Tasks can be created by another task and such creation can go on for // multiple levels. // // This is the `TaskInfo.key` of the task // that started the chain of tasks. - RootTaskKey string `xml:"rootTaskKey,omitempty" json:"rootTaskKey,omitempty" vim:"4.0"` + RootTaskKey string `xml:"rootTaskKey,omitempty" json:"rootTaskKey,omitempty"` // The activation Id is a client-provided token to link an API call with a task. - ActivationId string `xml:"activationId,omitempty" json:"activationId,omitempty" vim:"6.0"` + ActivationId string `xml:"activationId,omitempty" json:"activationId,omitempty"` } func init() { t["TaskInfo"] = reflect.TypeOf((*TaskInfo)(nil)).Elem() } +// This data object type defines the specification for the filter used +// to include or exclude various information from the tasks while retrieving +// from the history collector database. +// +// The client creates a task history +// collector with `TaskFilterSpec` along with this optional +// spec, then retrieves the tasks from the task history collector. +type TaskInfoFilterSpec struct { + DynamicData + + // The filter specification for filtering out tasks' results. + // + // If it is set, then the + // results information will be included or excluded based on the supplied parameters. If it is + // not set, then results information of all tasks will be included. + FilterTaskResults *TaskInfoFilterSpecFilterTaskResults `xml:"filterTaskResults,omitempty" json:"filterTaskResults,omitempty"` +} + +func init() { + t["TaskInfoFilterSpec"] = reflect.TypeOf((*TaskInfoFilterSpec)(nil)).Elem() + minAPIVersionForType["TaskInfoFilterSpec"] = "8.0.3.0" +} + +// This data object type enables to filter the results information for +// all or the specified tasks. +// +// 1\. If removeAll=true, the results information of all tasks will be excluded. +// 2\. If removeAll=false/unset: +// a. If descriptionIds is empty, the results information of all tasks will be included. +// b. If descriptionIds is non-empty: +// i. If filterIn=true, the results information of all tasks will be included. +// ii. If filterIn=false/unset, the results information of all tasks will be excluded. +type TaskInfoFilterSpecFilterTaskResults struct { + DynamicData + + // Excludes results information of all tasks. + // + // If set to true, the results information of all tasks will be excluded. + RemoveAll *bool `xml:"removeAll" json:"removeAll,omitempty"` + // The description IDs of tasks that have to be filtered out. + // + // The `TaskInfoFilterSpecFilterTaskResults.filterIn` + // option can switch the behavior to filter in. + DescriptionIds []string `xml:"descriptionIds,omitempty" json:"descriptionIds,omitempty"` + // Boolean Flag to invert the filter semantics to filter in the results instead of + // filtering out. + // + // If set to true, then the results of only the tasks specified by the + // `TaskInfoFilterSpecFilterTaskResults.descriptionIds` will be included. + // If unset or set to false, then the results of only the tasks specified by the + // `TaskInfoFilterSpecFilterTaskResults.descriptionIds` will be excluded. + // This boolean flag will only be considered if descriptionsIds is non-empty and if removeAll is false. + FilterIn *bool `xml:"filterIn" json:"filterIn,omitempty"` +} + +func init() { + t["TaskInfoFilterSpecFilterTaskResults"] = reflect.TypeOf((*TaskInfoFilterSpecFilterTaskResults)(nil)).Elem() + minAPIVersionForType["TaskInfoFilterSpecFilterTaskResults"] = "8.0.3.0" +} + // Base type for all task reasons. // // Task reasons represent the kind of entity responsible for a task's creation. @@ -76905,7 +76594,6 @@ type TaskTimeoutEvent struct { func init() { t["TaskTimeoutEvent"] = reflect.TypeOf((*TaskTimeoutEvent)(nil)).Elem() - minAPIVersionForType["TaskTimeoutEvent"] = "2.5" } // The teaming configuration of the uplink ports in the DVS matches @@ -76916,7 +76604,6 @@ type TeamingMatchEvent struct { func init() { t["TeamingMatchEvent"] = reflect.TypeOf((*TeamingMatchEvent)(nil)).Elem() - minAPIVersionForType["TeamingMatchEvent"] = "5.1" } // The teaming configuration of the uplink ports in the DVS @@ -76927,7 +76614,6 @@ type TeamingMisMatchEvent struct { func init() { t["TeamingMisMatchEvent"] = reflect.TypeOf((*TeamingMisMatchEvent)(nil)).Elem() - minAPIVersionForType["TeamingMisMatchEvent"] = "5.1" } // This event records the start of a template upgrade. @@ -77126,7 +76812,6 @@ type ThirdPartyLicenseAssignmentFailed struct { func init() { t["ThirdPartyLicenseAssignmentFailed"] = reflect.TypeOf((*ThirdPartyLicenseAssignmentFailed)(nil)).Elem() - minAPIVersionForType["ThirdPartyLicenseAssignmentFailed"] = "5.0" } type ThirdPartyLicenseAssignmentFailedFault ThirdPartyLicenseAssignmentFailed @@ -77156,7 +76841,6 @@ type TicketedSessionAuthentication struct { func init() { t["TicketedSessionAuthentication"] = reflect.TypeOf((*TicketedSessionAuthentication)(nil)).Elem() - minAPIVersionForType["TicketedSessionAuthentication"] = "5.0" } // This event indicates that an operation performed on the host timed out. @@ -77196,7 +76880,6 @@ type TooManyConcurrentNativeClones struct { func init() { t["TooManyConcurrentNativeClones"] = reflect.TypeOf((*TooManyConcurrentNativeClones)(nil)).Elem() - minAPIVersionForType["TooManyConcurrentNativeClones"] = "5.0" } type TooManyConcurrentNativeClonesFault TooManyConcurrentNativeClones @@ -77224,7 +76907,6 @@ type TooManyConsecutiveOverrides struct { func init() { t["TooManyConsecutiveOverrides"] = reflect.TypeOf((*TooManyConsecutiveOverrides)(nil)).Elem() - minAPIVersionForType["TooManyConsecutiveOverrides"] = "2.5" } type TooManyConsecutiveOverridesFault TooManyConsecutiveOverrides @@ -77268,7 +76950,6 @@ type TooManyDisksOnLegacyHost struct { func init() { t["TooManyDisksOnLegacyHost"] = reflect.TypeOf((*TooManyDisksOnLegacyHost)(nil)).Elem() - minAPIVersionForType["TooManyDisksOnLegacyHost"] = "2.5" } type TooManyDisksOnLegacyHostFault TooManyDisksOnLegacyHost @@ -77290,7 +76971,6 @@ type TooManyGuestLogons struct { func init() { t["TooManyGuestLogons"] = reflect.TypeOf((*TooManyGuestLogons)(nil)).Elem() - minAPIVersionForType["TooManyGuestLogons"] = "5.0" } type TooManyGuestLogonsFault TooManyGuestLogons @@ -77325,7 +77005,6 @@ type TooManyNativeCloneLevels struct { func init() { t["TooManyNativeCloneLevels"] = reflect.TypeOf((*TooManyNativeCloneLevels)(nil)).Elem() - minAPIVersionForType["TooManyNativeCloneLevels"] = "5.0" } type TooManyNativeCloneLevelsFault TooManyNativeCloneLevels @@ -77342,7 +77021,6 @@ type TooManyNativeClonesOnFile struct { func init() { t["TooManyNativeClonesOnFile"] = reflect.TypeOf((*TooManyNativeClonesOnFile)(nil)).Elem() - minAPIVersionForType["TooManyNativeClonesOnFile"] = "5.0" } type TooManyNativeClonesOnFileFault TooManyNativeClonesOnFile @@ -77375,7 +77053,6 @@ type ToolsAlreadyUpgraded struct { func init() { t["ToolsAlreadyUpgraded"] = reflect.TypeOf((*ToolsAlreadyUpgraded)(nil)).Elem() - minAPIVersionForType["ToolsAlreadyUpgraded"] = "4.0" } type ToolsAlreadyUpgradedFault ToolsAlreadyUpgraded @@ -77392,7 +77069,6 @@ type ToolsAutoUpgradeNotSupported struct { func init() { t["ToolsAutoUpgradeNotSupported"] = reflect.TypeOf((*ToolsAutoUpgradeNotSupported)(nil)).Elem() - minAPIVersionForType["ToolsAutoUpgradeNotSupported"] = "4.0" } type ToolsAutoUpgradeNotSupportedFault ToolsAutoUpgradeNotSupported @@ -77412,7 +77088,7 @@ type ToolsConfigInfo struct { // // The set of possible values is described in // `VirtualMachineToolsInstallType_enum` - ToolsInstallType string `xml:"toolsInstallType,omitempty" json:"toolsInstallType,omitempty" vim:"6.5"` + ToolsInstallType string `xml:"toolsInstallType,omitempty" json:"toolsInstallType,omitempty"` // Flag to specify whether or not scripts should run // after the virtual machine powers on. AfterPowerOn *bool `xml:"afterPowerOn" json:"afterPowerOn,omitempty"` @@ -77431,14 +77107,14 @@ type ToolsConfigInfo struct { // Tools upgrade policy setting for the virtual machine. // // See also `UpgradePolicy_enum`. - ToolsUpgradePolicy string `xml:"toolsUpgradePolicy,omitempty" json:"toolsUpgradePolicy,omitempty" vim:"2.5"` + ToolsUpgradePolicy string `xml:"toolsUpgradePolicy,omitempty" json:"toolsUpgradePolicy,omitempty"` // When set, this indicates that a customization operation is pending on the VM. // // The value represents the filename of the customization package on the host. - PendingCustomization string `xml:"pendingCustomization,omitempty" json:"pendingCustomization,omitempty" vim:"2.5"` + PendingCustomization string `xml:"pendingCustomization,omitempty" json:"pendingCustomization,omitempty"` // When set, provides the id of the key used to encrypt the customization // package attached to the VM. - CustomizationKeyId *CryptoKeyId `xml:"customizationKeyId,omitempty" json:"customizationKeyId,omitempty" vim:"6.5"` + CustomizationKeyId *CryptoKeyId `xml:"customizationKeyId,omitempty" json:"customizationKeyId,omitempty"` // Indicates whether or not the tools program is allowed to synchronize // guest time with host time. // @@ -77452,11 +77128,11 @@ type ToolsConfigInfo struct { // Periodical synchronization is // only allowed if `ToolsConfigInfo.syncTimeWithHostAllowed` // is not set to false. - SyncTimeWithHost *bool `xml:"syncTimeWithHost" json:"syncTimeWithHost,omitempty" vim:"2.5"` + SyncTimeWithHost *bool `xml:"syncTimeWithHost" json:"syncTimeWithHost,omitempty"` // Information about the last tools upgrade attempt if applicable. // // This information is maintained by the server and is ignored if set by the client. - LastInstallInfo *ToolsConfigInfoToolsLastInstallInfo `xml:"lastInstallInfo,omitempty" json:"lastInstallInfo,omitempty" vim:"5.0"` + LastInstallInfo *ToolsConfigInfoToolsLastInstallInfo `xml:"lastInstallInfo,omitempty" json:"lastInstallInfo,omitempty"` } func init() { @@ -77476,7 +77152,6 @@ type ToolsConfigInfoToolsLastInstallInfo struct { func init() { t["ToolsConfigInfoToolsLastInstallInfo"] = reflect.TypeOf((*ToolsConfigInfoToolsLastInstallInfo)(nil)).Elem() - minAPIVersionForType["ToolsConfigInfoToolsLastInstallInfo"] = "5.0" } // Thrown when the tools image couldn't be copied to the guest @@ -77487,7 +77162,6 @@ type ToolsImageCopyFailed struct { func init() { t["ToolsImageCopyFailed"] = reflect.TypeOf((*ToolsImageCopyFailed)(nil)).Elem() - minAPIVersionForType["ToolsImageCopyFailed"] = "5.1" } type ToolsImageCopyFailedFault ToolsImageCopyFailed @@ -77504,7 +77178,6 @@ type ToolsImageNotAvailable struct { func init() { t["ToolsImageNotAvailable"] = reflect.TypeOf((*ToolsImageNotAvailable)(nil)).Elem() - minAPIVersionForType["ToolsImageNotAvailable"] = "4.0" } type ToolsImageNotAvailableFault ToolsImageNotAvailable @@ -77521,7 +77194,6 @@ type ToolsImageSignatureCheckFailed struct { func init() { t["ToolsImageSignatureCheckFailed"] = reflect.TypeOf((*ToolsImageSignatureCheckFailed)(nil)).Elem() - minAPIVersionForType["ToolsImageSignatureCheckFailed"] = "4.0" } type ToolsImageSignatureCheckFailedFault ToolsImageSignatureCheckFailed @@ -77538,7 +77210,6 @@ type ToolsInstallationInProgress struct { func init() { t["ToolsInstallationInProgress"] = reflect.TypeOf((*ToolsInstallationInProgress)(nil)).Elem() - minAPIVersionForType["ToolsInstallationInProgress"] = "4.0" } type ToolsInstallationInProgressFault ToolsInstallationInProgress @@ -77572,7 +77243,6 @@ type ToolsUpgradeCancelled struct { func init() { t["ToolsUpgradeCancelled"] = reflect.TypeOf((*ToolsUpgradeCancelled)(nil)).Elem() - minAPIVersionForType["ToolsUpgradeCancelled"] = "4.0" } type ToolsUpgradeCancelledFault ToolsUpgradeCancelled @@ -77694,7 +77364,6 @@ type UnSupportedDatastoreForVFlash struct { func init() { t["UnSupportedDatastoreForVFlash"] = reflect.TypeOf((*UnSupportedDatastoreForVFlash)(nil)).Elem() - minAPIVersionForType["UnSupportedDatastoreForVFlash"] = "5.5" } type UnSupportedDatastoreForVFlashFault UnSupportedDatastoreForVFlash @@ -77780,7 +77449,6 @@ type UnconfiguredPropertyValue struct { func init() { t["UnconfiguredPropertyValue"] = reflect.TypeOf((*UnconfiguredPropertyValue)(nil)).Elem() - minAPIVersionForType["UnconfiguredPropertyValue"] = "4.0" } type UnconfiguredPropertyValueFault UnconfiguredPropertyValue @@ -77930,7 +77598,6 @@ type UnlicensedVirtualMachinesEvent struct { func init() { t["UnlicensedVirtualMachinesEvent"] = reflect.TypeOf((*UnlicensedVirtualMachinesEvent)(nil)).Elem() - minAPIVersionForType["UnlicensedVirtualMachinesEvent"] = "2.5" } // This event records that we discovered unlicensed virtual machines on @@ -77949,7 +77616,6 @@ type UnlicensedVirtualMachinesFoundEvent struct { func init() { t["UnlicensedVirtualMachinesFoundEvent"] = reflect.TypeOf((*UnlicensedVirtualMachinesFoundEvent)(nil)).Elem() - minAPIVersionForType["UnlicensedVirtualMachinesFoundEvent"] = "2.5" } // The parameters of `HostStorageSystem.UnmapVmfsVolumeEx_Task`. @@ -78129,7 +77795,6 @@ type UnrecognizedHost struct { func init() { t["UnrecognizedHost"] = reflect.TypeOf((*UnrecognizedHost)(nil)).Elem() - minAPIVersionForType["UnrecognizedHost"] = "2.5" } type UnrecognizedHostFault UnrecognizedHost @@ -78242,7 +77907,6 @@ type UnsharedSwapVMotionNotSupported struct { func init() { t["UnsharedSwapVMotionNotSupported"] = reflect.TypeOf((*UnsharedSwapVMotionNotSupported)(nil)).Elem() - minAPIVersionForType["UnsharedSwapVMotionNotSupported"] = "2.5" } type UnsharedSwapVMotionNotSupportedFault UnsharedSwapVMotionNotSupported @@ -78305,7 +77969,6 @@ type UnsupportedVimApiVersion struct { func init() { t["UnsupportedVimApiVersion"] = reflect.TypeOf((*UnsupportedVimApiVersion)(nil)).Elem() - minAPIVersionForType["UnsupportedVimApiVersion"] = "4.0" } type UnsupportedVimApiVersionFault UnsupportedVimApiVersion @@ -78334,6 +77997,18 @@ func init() { t["UnsupportedVmxLocationFault"] = reflect.TypeOf((*UnsupportedVmxLocationFault)(nil)).Elem() } +// Specifies SSL policy for untrusted SSL certificate. +// +// This option allows to explicitly disable SSL certificate verification. +type UntrustedCertificate struct { + IoFilterManagerSslTrust +} + +func init() { + t["UntrustedCertificate"] = reflect.TypeOf((*UntrustedCertificate)(nil)).Elem() + minAPIVersionForType["UntrustedCertificate"] = "8.0.3.0" +} + // The unused disk blocks of the specified virtual disk have not been // scrubbed on the file system. // @@ -78347,7 +78022,6 @@ type UnusedVirtualDiskBlocksNotScrubbed struct { func init() { t["UnusedVirtualDiskBlocksNotScrubbed"] = reflect.TypeOf((*UnusedVirtualDiskBlocksNotScrubbed)(nil)).Elem() - minAPIVersionForType["UnusedVirtualDiskBlocksNotScrubbed"] = "4.0" } type UnusedVirtualDiskBlocksNotScrubbedFault UnusedVirtualDiskBlocksNotScrubbed @@ -78977,7 +78651,7 @@ type UpdateInternetScsiAuthenticationPropertiesRequestType struct { // The set the targets to configure. Optional, // when obmitted will configura the authentication properties // for the adapter instead. - TargetSet *HostInternetScsiHbaTargetSet `xml:"targetSet,omitempty" json:"targetSet,omitempty" vim:"4.0"` + TargetSet *HostInternetScsiHbaTargetSet `xml:"targetSet,omitempty" json:"targetSet,omitempty"` } func init() { @@ -79192,6 +78866,10 @@ func init() { type UpdateKmipServerRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` // \[in\] KMIP server connection information. + // When update a KMIP server settings, changes to + // `KmipServerSpec#defaultKeyType` and + // `KmipServerSpec#wrappingKeySpec` + // will apply to all servers. Server KmipServerSpec `xml:"server" json:"server"` } @@ -79741,7 +79419,7 @@ type UpdateSet struct { // change calculations. The version may be passed to `PropertyCollector.CheckForUpdates`, `PropertyCollector.WaitForUpdates`, or `PropertyCollector.WaitForUpdatesEx` more than once. Re-using a version allows a client // to recover a change sequence after a transient failure on a previous // call. - Truncated *bool `xml:"truncated" json:"truncated,omitempty" vim:"4.1"` + Truncated *bool `xml:"truncated" json:"truncated,omitempty"` } func init() { @@ -79995,7 +79673,6 @@ type UpdateVirtualMachineFilesResult struct { func init() { t["UpdateVirtualMachineFilesResult"] = reflect.TypeOf((*UpdateVirtualMachineFilesResult)(nil)).Elem() - minAPIVersionForType["UpdateVirtualMachineFilesResult"] = "4.1" } type UpdateVirtualMachineFilesResultFailedVmFileInfo struct { @@ -80136,7 +79813,6 @@ type UpdatedAgentBeingRestartedEvent struct { func init() { t["UpdatedAgentBeingRestartedEvent"] = reflect.TypeOf((*UpdatedAgentBeingRestartedEvent)(nil)).Elem() - minAPIVersionForType["UpdatedAgentBeingRestartedEvent"] = "2.5" } // These event types represent events converted from VirtualCenter 1.x. @@ -80165,6 +79841,10 @@ type UpgradeIoFilterRequestType struct { CompRes ManagedObjectReference `xml:"compRes" json:"compRes"` // The URL that points to the new IO Filter VIB package. VibUrl string `xml:"vibUrl" json:"vibUrl"` + // This specifies SSL trust policy `IoFilterManagerSslTrust` + // for the given VIB URL. If unset, the server certificate is + // validated against the trusted root certificates. + VibSslTrust BaseIoFilterManagerSslTrust `xml:"vibSslTrust,omitempty,typeattr" json:"vibSslTrust,omitempty" vim:"8.0.3.0"` } func init() { @@ -80294,7 +79974,6 @@ type UplinkPortMtuNotSupportEvent struct { func init() { t["UplinkPortMtuNotSupportEvent"] = reflect.TypeOf((*UplinkPortMtuNotSupportEvent)(nil)).Elem() - minAPIVersionForType["UplinkPortMtuNotSupportEvent"] = "5.1" } // Mtu health check status of an uplink port is changed, and in the latest mtu health check, @@ -80306,7 +79985,6 @@ type UplinkPortMtuSupportEvent struct { func init() { t["UplinkPortMtuSupportEvent"] = reflect.TypeOf((*UplinkPortMtuSupportEvent)(nil)).Elem() - minAPIVersionForType["UplinkPortMtuSupportEvent"] = "5.1" } // Vlans health check status of an uplink port is changed, and in the latest vlan health check, @@ -80317,7 +79995,6 @@ type UplinkPortVlanTrunkedEvent struct { func init() { t["UplinkPortVlanTrunkedEvent"] = reflect.TypeOf((*UplinkPortVlanTrunkedEvent)(nil)).Elem() - minAPIVersionForType["UplinkPortVlanTrunkedEvent"] = "5.1" } // Vlans health check status of an uplink port is changed, and in the latest vlan health check, @@ -80328,7 +80005,6 @@ type UplinkPortVlanUntrunkedEvent struct { func init() { t["UplinkPortVlanUntrunkedEvent"] = reflect.TypeOf((*UplinkPortVlanUntrunkedEvent)(nil)).Elem() - minAPIVersionForType["UplinkPortVlanUntrunkedEvent"] = "5.1" } type UploadClientCert UploadClientCertRequestType @@ -80387,7 +80063,6 @@ type UsbScanCodeSpec struct { func init() { t["UsbScanCodeSpec"] = reflect.TypeOf((*UsbScanCodeSpec)(nil)).Elem() - minAPIVersionForType["UsbScanCodeSpec"] = "6.5" } type UsbScanCodeSpecKeyEvent struct { @@ -80427,7 +80102,6 @@ type UsbScanCodeSpecModifierType struct { func init() { t["UsbScanCodeSpecModifierType"] = reflect.TypeOf((*UsbScanCodeSpecModifierType)(nil)).Elem() - minAPIVersionForType["UsbScanCodeSpecModifierType"] = "6.5" } // This event records that a user account membership was added to a group. @@ -80456,7 +80130,6 @@ type UserGroupProfile struct { func init() { t["UserGroupProfile"] = reflect.TypeOf((*UserGroupProfile)(nil)).Elem() - minAPIVersionForType["UserGroupProfile"] = "4.0" } // The `UserInputRequiredParameterMetadata` data object represents policy option metadata @@ -80475,7 +80148,6 @@ type UserInputRequiredParameterMetadata struct { func init() { t["UserInputRequiredParameterMetadata"] = reflect.TypeOf((*UserInputRequiredParameterMetadata)(nil)).Elem() - minAPIVersionForType["UserInputRequiredParameterMetadata"] = "4.0" } // This event records a user logon. @@ -80489,7 +80161,7 @@ type UserLoginSessionEvent struct { // proxy if the binding uses a protocol that supports proxies, such as HTTP. IpAddress string `xml:"ipAddress" json:"ipAddress"` // The user agent or application - UserAgent string `xml:"userAgent,omitempty" json:"userAgent,omitempty" vim:"5.1"` + UserAgent string `xml:"userAgent,omitempty" json:"userAgent,omitempty"` // The locale of the session. Locale string `xml:"locale" json:"locale"` // The unique identifier for the session. @@ -80505,15 +80177,15 @@ type UserLogoutSessionEvent struct { SessionEvent // The IP address of client - IpAddress string `xml:"ipAddress,omitempty" json:"ipAddress,omitempty" vim:"5.1"` + IpAddress string `xml:"ipAddress,omitempty" json:"ipAddress,omitempty"` // The user agent or application - UserAgent string `xml:"userAgent,omitempty" json:"userAgent,omitempty" vim:"5.1"` + UserAgent string `xml:"userAgent,omitempty" json:"userAgent,omitempty"` // Number of API invocations made by the session - CallCount int64 `xml:"callCount,omitempty" json:"callCount,omitempty" vim:"5.1"` + CallCount int64 `xml:"callCount,omitempty" json:"callCount,omitempty"` // The unique identifier for the session. - SessionId string `xml:"sessionId,omitempty" json:"sessionId,omitempty" vim:"5.1"` + SessionId string `xml:"sessionId,omitempty" json:"sessionId,omitempty"` // Timestamp when the user logged on for this session. - LoginTime *time.Time `xml:"loginTime" json:"loginTime,omitempty" vim:"5.1"` + LoginTime *time.Time `xml:"loginTime" json:"loginTime,omitempty"` } func init() { @@ -80571,7 +80243,6 @@ type UserPrivilegeResult struct { func init() { t["UserPrivilegeResult"] = reflect.TypeOf((*UserPrivilegeResult)(nil)).Elem() - minAPIVersionForType["UserPrivilegeResult"] = "6.5" } // The `UserProfile` data object represents a user. @@ -80588,7 +80259,6 @@ type UserProfile struct { func init() { t["UserProfile"] = reflect.TypeOf((*UserProfile)(nil)).Elem() - minAPIVersionForType["UserProfile"] = "4.0" } // When searching for users, the search results in @@ -80641,16 +80311,16 @@ type UserSession struct { // the server determines this locale. MessageLocale string `xml:"messageLocale" json:"messageLocale"` // Whether or not this session belongs to a VC Extension. - ExtensionSession *bool `xml:"extensionSession" json:"extensionSession,omitempty" vim:"5.0"` + ExtensionSession *bool `xml:"extensionSession" json:"extensionSession,omitempty"` // The client identity. // // It could be IP address, or pipe name depended // on client binding - IpAddress string `xml:"ipAddress,omitempty" json:"ipAddress,omitempty" vim:"5.1"` + IpAddress string `xml:"ipAddress,omitempty" json:"ipAddress,omitempty"` // The name of user agent or application - UserAgent string `xml:"userAgent,omitempty" json:"userAgent,omitempty" vim:"5.1"` + UserAgent string `xml:"userAgent,omitempty" json:"userAgent,omitempty"` // Number of API invocations since the session started - CallCount int64 `xml:"callCount,omitempty" json:"callCount,omitempty" vim:"5.1"` + CallCount int64 `xml:"callCount,omitempty" json:"callCount,omitempty"` } func init() { @@ -80697,7 +80367,6 @@ type VASAStorageArray struct { func init() { t["VASAStorageArray"] = reflect.TypeOf((*VASAStorageArray)(nil)).Elem() - minAPIVersionForType["VASAStorageArray"] = "6.0" } // Discovery service information of the array with FC @@ -80772,12 +80441,12 @@ type VAppCloneSpec struct { // // This is often not a required // parameter. If not specified, the behavior is as follows: - // - If the target pool represents a stand-alone host, that host is used. - // - If the target pool represents a DRS-enabled cluster, a host selected - // by DRS is used. - // - If the target pool represents a cluster without DRS enabled or a - // DRS-enabled cluster in manual mode, an InvalidArgument exception is - // thrown. + // - If the target pool represents a stand-alone host, that host is used. + // - If the target pool represents a DRS-enabled cluster, a host selected + // by DRS is used. + // - If the target pool represents a cluster without DRS enabled or a + // DRS-enabled cluster in manual mode, an InvalidArgument exception is + // thrown. // // Refers instance of `HostSystem`. Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty"` @@ -80794,14 +80463,13 @@ type VAppCloneSpec struct { // A set of property values to override. Property []KeyValue `xml:"property,omitempty" json:"property,omitempty"` // The resource configuration for the cloned vApp. - ResourceMapping []VAppCloneSpecResourceMap `xml:"resourceMapping,omitempty" json:"resourceMapping,omitempty" vim:"4.1"` + ResourceMapping []VAppCloneSpecResourceMap `xml:"resourceMapping,omitempty" json:"resourceMapping,omitempty"` // Specify how the VMs in the vApp should be provisioned. - Provisioning string `xml:"provisioning,omitempty" json:"provisioning,omitempty" vim:"4.1"` + Provisioning string `xml:"provisioning,omitempty" json:"provisioning,omitempty"` } func init() { t["VAppCloneSpec"] = reflect.TypeOf((*VAppCloneSpec)(nil)).Elem() - minAPIVersionForType["VAppCloneSpec"] = "4.0" } // Maps one network to another as part of the clone process. @@ -80822,7 +80490,6 @@ type VAppCloneSpecNetworkMappingPair struct { func init() { t["VAppCloneSpecNetworkMappingPair"] = reflect.TypeOf((*VAppCloneSpecNetworkMappingPair)(nil)).Elem() - minAPIVersionForType["VAppCloneSpecNetworkMappingPair"] = "4.0" } // Maps source child entities to destination resource pools @@ -80863,7 +80530,6 @@ type VAppCloneSpecResourceMap struct { func init() { t["VAppCloneSpecResourceMap"] = reflect.TypeOf((*VAppCloneSpecResourceMap)(nil)).Elem() - minAPIVersionForType["VAppCloneSpecResourceMap"] = "4.1" } // Base for configuration / environment issues that can be thrown when powering on or @@ -80874,7 +80540,6 @@ type VAppConfigFault struct { func init() { t["VAppConfigFault"] = reflect.TypeOf((*VAppConfigFault)(nil)).Elem() - minAPIVersionForType["VAppConfigFault"] = "4.0" } type VAppConfigFaultFault BaseVAppConfigFault @@ -80896,18 +80561,17 @@ type VAppConfigInfo struct { // // This identifier is used by vCenter to uniquely identify all // vApp instances. - InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty" vim:"4.1"` + InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty"` // Specifies that this vApp is managed by a VC Extension. // // See the // `managedBy` property in the // VAppConfigSpec for more details. - ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty" vim:"5.0"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty"` } func init() { t["VAppConfigInfo"] = reflect.TypeOf((*VAppConfigInfo)(nil)).Elem() - minAPIVersionForType["VAppConfigInfo"] = "4.0" } // Configuration of a vApp @@ -80933,7 +80597,7 @@ type VAppConfigSpec struct { // VirtualCenter detects an identifier conflict between vApps. // // Reconfigure privilege: VApp.ApplicationConfig - InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty" vim:"4.1"` + InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty"` // Specifies that this vApp is managed by a VC Extension. // // This information is primarily used in the Client to show a custom icon for @@ -80944,12 +80608,11 @@ type VAppConfigSpec struct { // extension, the default vApp icon is used, and no description is shown. // // Reconfigure privilege: VApp.ApplicationConfig - ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty" vim:"5.0"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty"` } func init() { t["VAppConfigSpec"] = reflect.TypeOf((*VAppConfigSpec)(nil)).Elem() - minAPIVersionForType["VAppConfigSpec"] = "4.0" } // This object type describes the behavior of an entity (virtual machine or @@ -81044,12 +80707,11 @@ type VAppEntityConfigInfo struct { // This is only set for linked children. // // Reconfigure privilege: VApp.ApplicationConfig - DestroyWithParent *bool `xml:"destroyWithParent" json:"destroyWithParent,omitempty" vim:"4.1"` + DestroyWithParent *bool `xml:"destroyWithParent" json:"destroyWithParent,omitempty"` } func init() { t["VAppEntityConfigInfo"] = reflect.TypeOf((*VAppEntityConfigInfo)(nil)).Elem() - minAPIVersionForType["VAppEntityConfigInfo"] = "4.0" } // The IPAssignmentInfo class specifies how the guest software gets @@ -81126,7 +80788,6 @@ type VAppIPAssignmentInfo struct { func init() { t["VAppIPAssignmentInfo"] = reflect.TypeOf((*VAppIPAssignmentInfo)(nil)).Elem() - minAPIVersionForType["VAppIPAssignmentInfo"] = "4.0" } // A virtual machine in a vApp cannot be powered on unless the @@ -81137,7 +80798,6 @@ type VAppNotRunning struct { func init() { t["VAppNotRunning"] = reflect.TypeOf((*VAppNotRunning)(nil)).Elem() - minAPIVersionForType["VAppNotRunning"] = "4.0" } type VAppNotRunningFault VAppNotRunning @@ -81157,7 +80817,6 @@ type VAppOperationInProgress struct { func init() { t["VAppOperationInProgress"] = reflect.TypeOf((*VAppOperationInProgress)(nil)).Elem() - minAPIVersionForType["VAppOperationInProgress"] = "5.0" } type VAppOperationInProgressFault VAppOperationInProgress @@ -81193,7 +80852,6 @@ type VAppOvfSectionInfo struct { func init() { t["VAppOvfSectionInfo"] = reflect.TypeOf((*VAppOvfSectionInfo)(nil)).Elem() - minAPIVersionForType["VAppOvfSectionInfo"] = "4.0" } // An incremental update to the OvfSection list. @@ -81205,7 +80863,6 @@ type VAppOvfSectionSpec struct { func init() { t["VAppOvfSectionSpec"] = reflect.TypeOf((*VAppOvfSectionSpec)(nil)).Elem() - minAPIVersionForType["VAppOvfSectionSpec"] = "4.0" } // Information that describes what product a vApp contains, for example, @@ -81247,7 +80904,6 @@ type VAppProductInfo struct { func init() { t["VAppProductInfo"] = reflect.TypeOf((*VAppProductInfo)(nil)).Elem() - minAPIVersionForType["VAppProductInfo"] = "4.0" } // An incremental update to the Product information list. @@ -81259,7 +80915,6 @@ type VAppProductSpec struct { func init() { t["VAppProductSpec"] = reflect.TypeOf((*VAppProductSpec)(nil)).Elem() - minAPIVersionForType["VAppProductSpec"] = "4.0" } // The base fault for all vApp property configuration issues @@ -81281,7 +80936,6 @@ type VAppPropertyFault struct { func init() { t["VAppPropertyFault"] = reflect.TypeOf((*VAppPropertyFault)(nil)).Elem() - minAPIVersionForType["VAppPropertyFault"] = "4.0" } type VAppPropertyFaultFault BaseVAppPropertyFault @@ -81334,41 +80988,41 @@ type VAppPropertyInfo struct { // Describes the valid format of the property. // // A type must be one of: - // - string : A generic string. Max length 65535 (64k). - // - string(x..) : A string with minimum character length x. - // - string(..y) : A string with maximum character length y. - // - string(x..y) : A string with minimum character length x and maximum - // character length y. - // - string\["choice1", "choice2", "choice3"\] : A set of choices. " inside a choice - // must be either \\" or ' e.g "start\\"middle\\"end" or "start'middle'end" and - // a \\ inside a string choice must be encoded as \\\\ e.g. "start\\\\end". - // - int : An integer value. Is semantically equivalent to - // int(-2147483648..2147483647) e.g. signed int32. - // - int(x..y): An integer value with a minimum size x and a maximum size y. - // For example int(0..255) is a number between 0 and 255 both incl. This is - // also a way to specify that the number must be a uint8. There is always a lower - // and lower bound. Max number of digits is 100 including any sign. If exported to OVF the - // value will be truncated to max of uint64 or int64. - // - real : IEEE 8-byte floating-point value. - // - real(x..y) : IEEE 8-byte floating-point value with a minimum size x and a - // maximum size y. For example real(-1.5..1.5) must be a number between -1.5 and 1.5. - // Because of the nature of float some conversions can truncate the value. - // Real must be encoded according to CIM: - // RealValue = \[ "+" | "-" } \*decimalDigit "." 1\*decimalDigit - // \[ ("e" | "E" ) \[ "+" | "-" \] 1\*decimalDigit \] \] - // - boolean : A boolean. The value can be True or False - // - password : A generic string. Max length 65535 (64k). - // - password(x..) : A string with minimum character length x. - // - password(..y) : A string with maximum character length y. - // - password(x..y) : A string with minimum character length x and maximum - // character length y. - // - ip : An IPv4 address in dot-decimal notation or an IPv6 address in - // colon-hexadecimal notation. - // - ip:network : An IP address in dot-notation (IPv4) and colon-hexadecimal (IPv6) - // on a particular network. The behavior of this type depends on the - // ipAllocationPolicy. See below. - // - expression: The default value specifies an expression that is calculated - // by the system. + // - string : A generic string. Max length 65535 (64k). + // - string(x..) : A string with minimum character length x. + // - string(..y) : A string with maximum character length y. + // - string(x..y) : A string with minimum character length x and maximum + // character length y. + // - string\["choice1", "choice2", "choice3"\] : A set of choices. " inside a choice + // must be either \\" or ' e.g "start\\"middle\\"end" or "start'middle'end" and + // a \\ inside a string choice must be encoded as \\\\ e.g. "start\\\\end". + // - int : An integer value. Is semantically equivalent to + // int(-2147483648..2147483647) e.g. signed int32. + // - int(x..y): An integer value with a minimum size x and a maximum size y. + // For example int(0..255) is a number between 0 and 255 both incl. This is + // also a way to specify that the number must be a uint8. There is always a lower + // and lower bound. Max number of digits is 100 including any sign. If exported to OVF the + // value will be truncated to max of uint64 or int64. + // - real : IEEE 8-byte floating-point value. + // - real(x..y) : IEEE 8-byte floating-point value with a minimum size x and a + // maximum size y. For example real(-1.5..1.5) must be a number between -1.5 and 1.5. + // Because of the nature of float some conversions can truncate the value. + // Real must be encoded according to CIM: + // RealValue = \[ "+" | "-" } \*decimalDigit "." 1\*decimalDigit + // \[ ("e" | "E" ) \[ "+" | "-" \] 1\*decimalDigit \] \] + // - boolean : A boolean. The value can be True or False + // - password : A generic string. Max length 65535 (64k). + // - password(x..) : A string with minimum character length x. + // - password(..y) : A string with maximum character length y. + // - password(x..y) : A string with minimum character length x and maximum + // character length y. + // - ip : An IPv4 address in dot-decimal notation or an IPv6 address in + // colon-hexadecimal notation. + // - ip:network : An IP address in dot-notation (IPv4) and colon-hexadecimal (IPv6) + // on a particular network. The behavior of this type depends on the + // ipAllocationPolicy. See below. + // - expression: The default value specifies an expression that is calculated + // by the system. // // For properties of type 'password', the value field and default value field will // always be returned as an empty string when queried. Thus, it is a write-only property. @@ -81377,29 +81031,29 @@ type VAppPropertyInfo struct { // // An expression follows the general patterns of either ${arg} or ${cmd:arg}. The // list of supported expressions are listed below: - // - ${<name>} : This expression evaluates to the same value as the named - // property in the parent vApp. A parent vApp is the - // first vApp in the ancestry chain (resource pools are - // skipped). If no parent vApp exists or the property is - // not defined on the parent vApp, the expression - // evaluates to the empty value. - // - ${subnet:<network>} : The subnet value of the given network. - // - ${netmask:<network>} : The netmask value of the given network. - // - ${gateway:<network>} : The gateway value of the given network. - // - ${autoIp:<network>} : An auto-assigned network address on the given - // network - // - ${net:<network>} : The name of the network - // - ${domainName:<network>} : The DNS domain name, e.g., vmware.com, of - // the given network. - // - ${searchPath:<network>} : The DNS search path, e.g., - // eng.vmware.com;vmware.com, of the given - // network. - // - ${hostPrefix:<network>}: The host prefix on a given network, e.g., - // "voe-" - // - ${dns:network}: A comma-separated string of configured network addresses - // - ${httpProxy:network}: The hostname:port for a proxy on the network - // - ${vimIp:} : The IP address of the VIM API provider server. This would - // typical be an ESX Server or VirtualCenter Server. + // - ${<name>} : This expression evaluates to the same value as the named + // property in the parent vApp. A parent vApp is the + // first vApp in the ancestry chain (resource pools are + // skipped). If no parent vApp exists or the property is + // not defined on the parent vApp, the expression + // evaluates to the empty value. + // - ${subnet:<network>} : The subnet value of the given network. + // - ${netmask:<network>} : The netmask value of the given network. + // - ${gateway:<network>} : The gateway value of the given network. + // - ${autoIp:<network>} : An auto-assigned network address on the given + // network + // - ${net:<network>} : The name of the network + // - ${domainName:<network>} : The DNS domain name, e.g., vmware.com, of + // the given network. + // - ${searchPath:<network>} : The DNS search path, e.g., + // eng.vmware.com;vmware.com, of the given + // network. + // - ${hostPrefix:<network>}: The host prefix on a given network, e.g., + // "voe-" + // - ${dns:network}: A comma-separated string of configured network addresses + // - ${httpProxy:network}: The hostname:port for a proxy on the network + // - ${vimIp:} : The IP address of the VIM API provider server. This would + // typical be an ESX Server or VirtualCenter Server. // // A vApp will fail to start if any of the properties cannot be computed. For // example, if a property reference a gateway on a network, for which is has not @@ -81408,9 +81062,9 @@ type VAppPropertyInfo struct { // the vApp or virtual machine is not-running. // // The system provides three ways of specifying IP addresses: - // - ip, - // - ip:network type, - // - ${ip:network} expression. + // - ip, + // - ip:network type, + // - ${ip:network} expression. // // The _ip_ types are typically used to specify an IP addressed to an // external system. Thus, these are not used by a virtual ethernet adapter @@ -81459,7 +81113,7 @@ type VAppPropertyInfo struct { // For types that // refer to network names the type reference is the managed object reference // of the network. - TypeReference string `xml:"typeReference,omitempty" json:"typeReference,omitempty" vim:"5.1"` + TypeReference string `xml:"typeReference,omitempty" json:"typeReference,omitempty"` // Whether the property is user-configurable or a system property. // // This is not used @@ -81487,7 +81141,6 @@ type VAppPropertyInfo struct { func init() { t["VAppPropertyInfo"] = reflect.TypeOf((*VAppPropertyInfo)(nil)).Elem() - minAPIVersionForType["VAppPropertyInfo"] = "4.0" } // An incremental update to the Property list. @@ -81499,7 +81152,6 @@ type VAppPropertySpec struct { func init() { t["VAppPropertySpec"] = reflect.TypeOf((*VAppPropertySpec)(nil)).Elem() - minAPIVersionForType["VAppPropertySpec"] = "4.0" } // A specialized TaskInProgress when an operation is performed @@ -81515,7 +81167,6 @@ type VAppTaskInProgress struct { func init() { t["VAppTaskInProgress"] = reflect.TypeOf((*VAppTaskInProgress)(nil)).Elem() - minAPIVersionForType["VAppTaskInProgress"] = "4.0" } type VAppTaskInProgressFault VAppTaskInProgress @@ -81542,6 +81193,7 @@ type VCenterUpdateVStorageObjectMetadataExRequestType struct { func init() { t["VCenterUpdateVStorageObjectMetadataExRequestType"] = reflect.TypeOf((*VCenterUpdateVStorageObjectMetadataExRequestType)(nil)).Elem() + minAPIVersionForType["VCenterUpdateVStorageObjectMetadataExRequestType"] = "7.0.2.0" } type VCenterUpdateVStorageObjectMetadataEx_Task VCenterUpdateVStorageObjectMetadataExRequestType @@ -81561,7 +81213,6 @@ type VFlashCacheHotConfigNotSupported struct { func init() { t["VFlashCacheHotConfigNotSupported"] = reflect.TypeOf((*VFlashCacheHotConfigNotSupported)(nil)).Elem() - minAPIVersionForType["VFlashCacheHotConfigNotSupported"] = "6.0" } type VFlashCacheHotConfigNotSupportedFault VFlashCacheHotConfigNotSupported @@ -81587,7 +81238,6 @@ type VFlashModuleNotSupported struct { func init() { t["VFlashModuleNotSupported"] = reflect.TypeOf((*VFlashModuleNotSupported)(nil)).Elem() - minAPIVersionForType["VFlashModuleNotSupported"] = "5.5" } type VFlashModuleNotSupportedFault VFlashModuleNotSupported @@ -81614,7 +81264,6 @@ type VFlashModuleVersionIncompatible struct { func init() { t["VFlashModuleVersionIncompatible"] = reflect.TypeOf((*VFlashModuleVersionIncompatible)(nil)).Elem() - minAPIVersionForType["VFlashModuleVersionIncompatible"] = "5.5" } type VFlashModuleVersionIncompatibleFault VFlashModuleVersionIncompatible @@ -81630,7 +81279,7 @@ type VMFSDatastoreCreatedEvent struct { // The associated datastore. Datastore DatastoreEventArgument `xml:"datastore" json:"datastore"` // Url of the associated datastore. - DatastoreUrl string `xml:"datastoreUrl,omitempty" json:"datastoreUrl,omitempty" vim:"6.5"` + DatastoreUrl string `xml:"datastoreUrl,omitempty" json:"datastoreUrl,omitempty"` } func init() { @@ -81647,7 +81296,6 @@ type VMFSDatastoreExpandedEvent struct { func init() { t["VMFSDatastoreExpandedEvent"] = reflect.TypeOf((*VMFSDatastoreExpandedEvent)(nil)).Elem() - minAPIVersionForType["VMFSDatastoreExpandedEvent"] = "4.0" } // This event records when a datastore is extended. @@ -81660,7 +81308,6 @@ type VMFSDatastoreExtendedEvent struct { func init() { t["VMFSDatastoreExtendedEvent"] = reflect.TypeOf((*VMFSDatastoreExtendedEvent)(nil)).Elem() - minAPIVersionForType["VMFSDatastoreExtendedEvent"] = "4.0" } // The virtual machine is configured to use a VMI ROM, which is not @@ -81671,7 +81318,6 @@ type VMINotSupported struct { func init() { t["VMINotSupported"] = reflect.TypeOf((*VMINotSupported)(nil)).Elem() - minAPIVersionForType["VMINotSupported"] = "2.5" } type VMINotSupportedFault VMINotSupported @@ -81690,7 +81336,6 @@ type VMOnConflictDVPort struct { func init() { t["VMOnConflictDVPort"] = reflect.TypeOf((*VMOnConflictDVPort)(nil)).Elem() - minAPIVersionForType["VMOnConflictDVPort"] = "4.0" } type VMOnConflictDVPortFault VMOnConflictDVPort @@ -81727,7 +81372,6 @@ type VMotionAcrossNetworkNotSupported struct { func init() { t["VMotionAcrossNetworkNotSupported"] = reflect.TypeOf((*VMotionAcrossNetworkNotSupported)(nil)).Elem() - minAPIVersionForType["VMotionAcrossNetworkNotSupported"] = "5.5" } type VMotionAcrossNetworkNotSupportedFault VMotionAcrossNetworkNotSupported @@ -81751,7 +81395,7 @@ type VMotionInterfaceIssue struct { // The host with the bad interface. // // Refers instance of `HostSystem`. - FailedHostEntity *ManagedObjectReference `xml:"failedHostEntity,omitempty" json:"failedHostEntity,omitempty" vim:"2.5"` + FailedHostEntity *ManagedObjectReference `xml:"failedHostEntity,omitempty" json:"failedHostEntity,omitempty"` } func init() { @@ -81897,7 +81541,7 @@ type VMwareDVSConfigInfo struct { DVSConfigInfo // The Distributed Port Mirroring sessions in the switch. - VspanSession []VMwareVspanSession `xml:"vspanSession,omitempty" json:"vspanSession,omitempty" vim:"5.0"` + VspanSession []VMwareVspanSession `xml:"vspanSession,omitempty" json:"vspanSession,omitempty"` // The PVLAN configured in the switch. PvlanConfig []VMwareDVSPvlanMapEntry `xml:"pvlanConfig,omitempty" json:"pvlanConfig,omitempty"` // The maximum MTU in the switch. @@ -81911,26 +81555,31 @@ type VMwareDVSConfigInfo struct { // portgroup or port of the switch. // // See also `VMwareDVSPortSetting.ipfixEnabled`. - IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty" json:"ipfixConfig,omitempty" vim:"5.0"` + IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty" json:"ipfixConfig,omitempty"` // The Link Aggregation Control Protocol groups in the switch. - LacpGroupConfig []VMwareDvsLacpGroupConfig `xml:"lacpGroupConfig,omitempty" json:"lacpGroupConfig,omitempty" vim:"5.5"` + LacpGroupConfig []VMwareDvsLacpGroupConfig `xml:"lacpGroupConfig,omitempty" json:"lacpGroupConfig,omitempty"` // The Link Aggregation Control Protocol group version in the switch. // // See `VMwareDvsLacpApiVersion_enum` for valid values. - LacpApiVersion string `xml:"lacpApiVersion,omitempty" json:"lacpApiVersion,omitempty" vim:"5.5"` + LacpApiVersion string `xml:"lacpApiVersion,omitempty" json:"lacpApiVersion,omitempty"` // The Multicast Filtering mode in the switch. // // See `VMwareDvsMulticastFilteringMode_enum` for valid values. - MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty" json:"multicastFilteringMode,omitempty" vim:"6.0"` + MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty" json:"multicastFilteringMode,omitempty"` // Indicate the ID of NetworkOffloadSpec used in the switch. // // ID "None" means that network offload is not allowed in the switch. NetworkOffloadSpecId string `xml:"networkOffloadSpecId,omitempty" json:"networkOffloadSpecId,omitempty" vim:"8.0.0.1"` + // The network offload specific configuration of the switch. + // + // It is only set when network offload is allowed + // (`VMwareDVSConfigInfo.networkOffloadSpecId` + // is not "None"). + NetworkOffloadConfig *VmwareDistributedVirtualSwitchNetworkOffloadConfig `xml:"networkOffloadConfig,omitempty" json:"networkOffloadConfig,omitempty" vim:"8.0.3.0"` } func init() { t["VMwareDVSConfigInfo"] = reflect.TypeOf((*VMwareDVSConfigInfo)(nil)).Elem() - minAPIVersionForType["VMwareDVSConfigInfo"] = "4.0" } // This class defines the VMware specific configuration for @@ -81969,7 +81618,7 @@ type VMwareDVSConfigSpec struct { // // The VSPAN // sessions in the array cannot be of the same key. - VspanConfigSpec []VMwareDVSVspanConfigSpec `xml:"vspanConfigSpec,omitempty" json:"vspanConfigSpec,omitempty" vim:"5.0"` + VspanConfigSpec []VMwareDVSVspanConfigSpec `xml:"vspanConfigSpec,omitempty" json:"vspanConfigSpec,omitempty"` // The maximum MTU in the switch. MaxMtu int32 `xml:"maxMtu,omitempty" json:"maxMtu,omitempty"` // See `LinkDiscoveryProtocolConfig`. @@ -81981,25 +81630,30 @@ type VMwareDVSConfigSpec struct { // portgroup or port of the switch. // // See also `VMwareDVSPortSetting.ipfixEnabled`. - IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty" json:"ipfixConfig,omitempty" vim:"5.0"` + IpfixConfig *VMwareIpfixConfig `xml:"ipfixConfig,omitempty" json:"ipfixConfig,omitempty"` // The Link Aggregation Control Protocol group version in the switch. // // See `VMwareDvsLacpApiVersion_enum` for valid values. - LacpApiVersion string `xml:"lacpApiVersion,omitempty" json:"lacpApiVersion,omitempty" vim:"5.5"` + LacpApiVersion string `xml:"lacpApiVersion,omitempty" json:"lacpApiVersion,omitempty"` // The Multicast Filtering mode in the switch. // // See `VMwareDvsMulticastFilteringMode_enum` for valid values. - MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty" json:"multicastFilteringMode,omitempty" vim:"6.0"` + MulticastFilteringMode string `xml:"multicastFilteringMode,omitempty" json:"multicastFilteringMode,omitempty"` // Indicate the ID of NetworkOffloadSpec used in the switch. // // Unset it when network offload is not allowed when creating a switch. // Use ID "None" to change network offload from allowed to not allowed. NetworkOffloadSpecId string `xml:"networkOffloadSpecId,omitempty" json:"networkOffloadSpecId,omitempty" vim:"8.0.0.1"` + // The network offload specific configuration of the switch. + // + // This can be set only when network offload is allowed + // (`VMwareDVSConfigInfo.networkOffloadSpecId` + // is not "None"). + NetworkOffloadConfig *VmwareDistributedVirtualSwitchNetworkOffloadConfig `xml:"networkOffloadConfig,omitempty" json:"networkOffloadConfig,omitempty" vim:"8.0.3.0"` } func init() { t["VMwareDVSConfigSpec"] = reflect.TypeOf((*VMwareDVSConfigSpec)(nil)).Elem() - minAPIVersionForType["VMwareDVSConfigSpec"] = "4.0" } // Indicators of support for version-specific DVS features that are only @@ -82011,44 +81665,43 @@ type VMwareDVSFeatureCapability struct { // vSphere Distributed Switch. // // Distributed Port Mirroring is supported in vSphere Distributed Switch Version 5.0 or later. - VspanSupported *bool `xml:"vspanSupported" json:"vspanSupported,omitempty" vim:"5.0"` + VspanSupported *bool `xml:"vspanSupported" json:"vspanSupported,omitempty"` // Flag to indicate whether LLDP(Link Layer Discovery Protocol) is supported on the // vSphere Distributed Switch. // // LLDP is supported in vSphere Distributed Switch Version 5.0 or later. - LldpSupported *bool `xml:"lldpSupported" json:"lldpSupported,omitempty" vim:"5.0"` + LldpSupported *bool `xml:"lldpSupported" json:"lldpSupported,omitempty"` // Deprecated as of vSphere API 6.0, use `VMwareDvsIpfixCapability`. // // Flag to indicate whether IPFIX(NetFlow) is supported on the // vSphere Distributed Switch. // // IPFIX is supported in vSphere Distributed Switch Version 5.0 or later. - IpfixSupported *bool `xml:"ipfixSupported" json:"ipfixSupported,omitempty" vim:"5.0"` + IpfixSupported *bool `xml:"ipfixSupported" json:"ipfixSupported,omitempty"` // The support for version-specific IPFIX(NetFlow). - IpfixCapability *VMwareDvsIpfixCapability `xml:"ipfixCapability,omitempty" json:"ipfixCapability,omitempty" vim:"6.0"` + IpfixCapability *VMwareDvsIpfixCapability `xml:"ipfixCapability,omitempty" json:"ipfixCapability,omitempty"` // Flag to indicate whether multicast snooping(IGMP/MLD Snooping) // is supported on the vSphere Distributed Switch. // // IGMP/MLD Snooping is supported in vSphere Distributed Switch Version 6.0 or later. - MulticastSnoopingSupported *bool `xml:"multicastSnoopingSupported" json:"multicastSnoopingSupported,omitempty" vim:"6.0"` + MulticastSnoopingSupported *bool `xml:"multicastSnoopingSupported" json:"multicastSnoopingSupported,omitempty"` // The support for version-specific Distributed Port Mirroring sessions. - VspanCapability *VMwareDVSVspanCapability `xml:"vspanCapability,omitempty" json:"vspanCapability,omitempty" vim:"5.1"` + VspanCapability *VMwareDVSVspanCapability `xml:"vspanCapability,omitempty" json:"vspanCapability,omitempty"` // The support for version-specific Link Aggregation Control Protocol. - LacpCapability *VMwareDvsLacpCapability `xml:"lacpCapability,omitempty" json:"lacpCapability,omitempty" vim:"5.1"` + LacpCapability *VMwareDvsLacpCapability `xml:"lacpCapability,omitempty" json:"lacpCapability,omitempty"` // The support for version-specific DPU(SmartNic). DpuCapability *VMwareDvsDpuCapability `xml:"dpuCapability,omitempty" json:"dpuCapability,omitempty" vim:"8.0.0.1"` // Flag to indicate whether NSX is supported on the // vSphere Distributed Switch. // // NSX is supported in vSphere Distributed Switch Version 7.0 or later. - NsxSupported *bool `xml:"nsxSupported" json:"nsxSupported,omitempty" vim:"7.0"` + NsxSupported *bool `xml:"nsxSupported" json:"nsxSupported,omitempty"` // The support for version-specific supported MTU. MtuCapability *VMwareDvsMtuCapability `xml:"mtuCapability,omitempty" json:"mtuCapability,omitempty" vim:"7.0.2.0"` } func init() { t["VMwareDVSFeatureCapability"] = reflect.TypeOf((*VMwareDVSFeatureCapability)(nil)).Elem() - minAPIVersionForType["VMwareDVSFeatureCapability"] = "4.1" } // The feature capabilities of health check supported by the @@ -82066,7 +81719,6 @@ type VMwareDVSHealthCheckCapability struct { func init() { t["VMwareDVSHealthCheckCapability"] = reflect.TypeOf((*VMwareDVSHealthCheckCapability)(nil)).Elem() - minAPIVersionForType["VMwareDVSHealthCheckCapability"] = "5.1" } // This class defines health check configuration for @@ -82077,7 +81729,6 @@ type VMwareDVSHealthCheckConfig struct { func init() { t["VMwareDVSHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSHealthCheckConfig)(nil)).Elem() - minAPIVersionForType["VMwareDVSHealthCheckConfig"] = "5.1" } // This class defines MTU health check result of an uplink port @@ -82108,7 +81759,6 @@ type VMwareDVSMtuHealthCheckResult struct { func init() { t["VMwareDVSMtuHealthCheckResult"] = reflect.TypeOf((*VMwareDVSMtuHealthCheckResult)(nil)).Elem() - minAPIVersionForType["VMwareDVSMtuHealthCheckResult"] = "5.1" } // This class defines the VMware specific configuration for @@ -82141,7 +81791,7 @@ type VMwareDVSPortSetting struct { // and an appropriately populated // *ipfix configuration* // that specifies a collector IP address and port. - IpfixEnabled *BoolPolicy `xml:"ipfixEnabled,omitempty" json:"ipfixEnabled,omitempty" vim:"5.0"` + IpfixEnabled *BoolPolicy `xml:"ipfixEnabled,omitempty" json:"ipfixEnabled,omitempty"` // If true, a copy of packets sent to the switch will always be forwarded to // an uplink in addition to the regular packet forwarded done by the switch. TxUplink *BoolPolicy `xml:"txUplink,omitempty" json:"txUplink,omitempty"` @@ -82154,17 +81804,16 @@ type VMwareDVSPortSetting struct { // // This policy is ignored on non-uplink portgroups. // Setting this policy at port level is not supported. - LacpPolicy *VMwareUplinkLacpPolicy `xml:"lacpPolicy,omitempty" json:"lacpPolicy,omitempty" vim:"5.1"` + LacpPolicy *VMwareUplinkLacpPolicy `xml:"lacpPolicy,omitempty" json:"lacpPolicy,omitempty"` // The MAC learning policy. - MacManagementPolicy *DVSMacManagementPolicy `xml:"macManagementPolicy,omitempty" json:"macManagementPolicy,omitempty" vim:"6.7"` + MacManagementPolicy *DVSMacManagementPolicy `xml:"macManagementPolicy,omitempty" json:"macManagementPolicy,omitempty"` // The VNI number of overlay logical switch, which is used by // NSX portgroup. - VNI *IntPolicy `xml:"VNI,omitempty" json:"VNI,omitempty" vim:"7.0"` + VNI *IntPolicy `xml:"VNI,omitempty" json:"VNI,omitempty"` } func init() { t["VMwareDVSPortSetting"] = reflect.TypeOf((*VMwareDVSPortSetting)(nil)).Elem() - minAPIVersionForType["VMwareDVSPortSetting"] = "4.0" } // This class defines the VMware specific configuration for @@ -82200,18 +81849,17 @@ type VMwareDVSPortgroupPolicy struct { // for an individual port to override the setting in // `DVPortgroupConfigInfo.defaultPortConfig` of // a portgroup. - IpfixOverrideAllowed *bool `xml:"ipfixOverrideAllowed" json:"ipfixOverrideAllowed,omitempty" vim:"5.0"` + IpfixOverrideAllowed *bool `xml:"ipfixOverrideAllowed" json:"ipfixOverrideAllowed,omitempty"` // Allow the setting of // `VMwareDVSPortSetting.macManagementPolicy` // for an individual port to override the setting in // `DVPortgroupConfigInfo.defaultPortConfig` of // a portgroup. - MacManagementOverrideAllowed *bool `xml:"macManagementOverrideAllowed" json:"macManagementOverrideAllowed,omitempty" vim:"6.7.1"` + MacManagementOverrideAllowed *bool `xml:"macManagementOverrideAllowed" json:"macManagementOverrideAllowed,omitempty"` } func init() { t["VMwareDVSPortgroupPolicy"] = reflect.TypeOf((*VMwareDVSPortgroupPolicy)(nil)).Elem() - minAPIVersionForType["VMwareDVSPortgroupPolicy"] = "4.0" } // This class defines the configuration of a PVLAN map entry @@ -82230,7 +81878,6 @@ type VMwareDVSPvlanConfigSpec struct { func init() { t["VMwareDVSPvlanConfigSpec"] = reflect.TypeOf((*VMwareDVSPvlanConfigSpec)(nil)).Elem() - minAPIVersionForType["VMwareDVSPvlanConfigSpec"] = "4.0" } // The class represents a PVLAN id. @@ -82256,7 +81903,6 @@ type VMwareDVSPvlanMapEntry struct { func init() { t["VMwareDVSPvlanMapEntry"] = reflect.TypeOf((*VMwareDVSPvlanMapEntry)(nil)).Elem() - minAPIVersionForType["VMwareDVSPvlanMapEntry"] = "4.0" } // This class defines the teaming health check configuration. @@ -82269,7 +81915,6 @@ type VMwareDVSTeamingHealthCheckConfig struct { func init() { t["VMwareDVSTeamingHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSTeamingHealthCheckConfig)(nil)).Elem() - minAPIVersionForType["VMwareDVSTeamingHealthCheckConfig"] = "5.1" } // This class defines teaming health check result of a host that @@ -82286,7 +81931,6 @@ type VMwareDVSTeamingHealthCheckResult struct { func init() { t["VMwareDVSTeamingHealthCheckResult"] = reflect.TypeOf((*VMwareDVSTeamingHealthCheckResult)(nil)).Elem() - minAPIVersionForType["VMwareDVSTeamingHealthCheckResult"] = "5.1" } // This class defines Vlan health check result of an uplink port @@ -82308,7 +81952,6 @@ type VMwareDVSVlanHealthCheckResult struct { func init() { t["VMwareDVSVlanHealthCheckResult"] = reflect.TypeOf((*VMwareDVSVlanHealthCheckResult)(nil)).Elem() - minAPIVersionForType["VMwareDVSVlanHealthCheckResult"] = "5.1" } // This class defines the vlan and mtu health check configuration. @@ -82323,7 +81966,6 @@ type VMwareDVSVlanMtuHealthCheckConfig struct { func init() { t["VMwareDVSVlanMtuHealthCheckConfig"] = reflect.TypeOf((*VMwareDVSVlanMtuHealthCheckConfig)(nil)).Elem() - minAPIVersionForType["VMwareDVSVlanMtuHealthCheckConfig"] = "5.1" } // Indicators of support for version-specific Distributed Port Mirroring sessions. @@ -82347,15 +81989,14 @@ type VMwareDVSVspanCapability struct { EncapRemoteSourceSupported bool `xml:"encapRemoteSourceSupported" json:"encapRemoteSourceSupported"` // Flag to indicate whether ERSPAN protocol encapsulation is supported // on the vSphere Distributed Switch. - ErspanProtocolSupported *bool `xml:"erspanProtocolSupported" json:"erspanProtocolSupported,omitempty" vim:"6.5"` + ErspanProtocolSupported *bool `xml:"erspanProtocolSupported" json:"erspanProtocolSupported,omitempty"` // Flag to indicate whether dvport mirror can be configured to use a // dedicated network stack instance. - MirrorNetstackSupported *bool `xml:"mirrorNetstackSupported" json:"mirrorNetstackSupported,omitempty" vim:"6.7"` + MirrorNetstackSupported *bool `xml:"mirrorNetstackSupported" json:"mirrorNetstackSupported,omitempty"` } func init() { t["VMwareDVSVspanCapability"] = reflect.TypeOf((*VMwareDVSVspanCapability)(nil)).Elem() - minAPIVersionForType["VMwareDVSVspanCapability"] = "5.1" } // This class defines the configuration of a Distributed Port Mirroring session. @@ -82373,7 +82014,6 @@ type VMwareDVSVspanConfigSpec struct { func init() { t["VMwareDVSVspanConfigSpec"] = reflect.TypeOf((*VMwareDVSVspanConfigSpec)(nil)).Elem() - minAPIVersionForType["VMwareDVSVspanConfigSpec"] = "5.0" } // The feature capabilities of Dpu Features supported by the @@ -82384,6 +82024,12 @@ type VMwareDvsDpuCapability struct { // Flag to indicate whether network offloading is supported on the // vSphere Distributed Switch. NetworkOffloadSupported *bool `xml:"networkOffloadSupported" json:"networkOffloadSupported,omitempty"` + // Flag to indicate whether the vSphere Distributed Switch supports + // connecting two DPUs to an offloading VDS and operating in an + // active-standby mode. + // + // If not set, the feature is not supported. + ActiveStandbyModeSupported *bool `xml:"activeStandbyModeSupported" json:"activeStandbyModeSupported,omitempty" vim:"8.0.3.0"` } func init() { @@ -82414,7 +82060,6 @@ type VMwareDvsIpfixCapability struct { func init() { t["VMwareDvsIpfixCapability"] = reflect.TypeOf((*VMwareDvsIpfixCapability)(nil)).Elem() - minAPIVersionForType["VMwareDvsIpfixCapability"] = "6.0" } // The feature capabilities of Link Aggregation Control Protocol supported by the @@ -82429,7 +82074,7 @@ type VMwareDvsLacpCapability struct { // than one Link Aggregation Control Protocol group to be configured. // // It is suppported in vSphere Distributed Switch Version 5.5 or later. - MultiLacpGroupSupported *bool `xml:"multiLacpGroupSupported" json:"multiLacpGroupSupported,omitempty" vim:"5.5"` + MultiLacpGroupSupported *bool `xml:"multiLacpGroupSupported" json:"multiLacpGroupSupported,omitempty"` // Flag to indicate whether LACP Fast Mode is supported on the // vSphere Distributed Switch. // @@ -82439,7 +82084,6 @@ type VMwareDvsLacpCapability struct { func init() { t["VMwareDvsLacpCapability"] = reflect.TypeOf((*VMwareDvsLacpCapability)(nil)).Elem() - minAPIVersionForType["VMwareDvsLacpCapability"] = "5.1" } // This class defines VMware specific multiple IEEE 802.3ad @@ -82482,7 +82126,6 @@ type VMwareDvsLacpGroupConfig struct { func init() { t["VMwareDvsLacpGroupConfig"] = reflect.TypeOf((*VMwareDvsLacpGroupConfig)(nil)).Elem() - minAPIVersionForType["VMwareDvsLacpGroupConfig"] = "5.5" } // This class defines the configuration of a Link Aggregation @@ -82499,7 +82142,6 @@ type VMwareDvsLacpGroupSpec struct { func init() { t["VMwareDvsLacpGroupSpec"] = reflect.TypeOf((*VMwareDvsLacpGroupSpec)(nil)).Elem() - minAPIVersionForType["VMwareDvsLacpGroupSpec"] = "5.5" } // This class defines the ipfix configuration of the Link Aggregation @@ -82521,7 +82163,6 @@ type VMwareDvsLagIpfixConfig struct { func init() { t["VMwareDvsLagIpfixConfig"] = reflect.TypeOf((*VMwareDvsLagIpfixConfig)(nil)).Elem() - minAPIVersionForType["VMwareDvsLagIpfixConfig"] = "5.5" } // This class defines the vlan configuration of the Link Aggregation @@ -82545,7 +82186,6 @@ type VMwareDvsLagVlanConfig struct { func init() { t["VMwareDvsLagVlanConfig"] = reflect.TypeOf((*VMwareDvsLagVlanConfig)(nil)).Elem() - minAPIVersionForType["VMwareDvsLagVlanConfig"] = "5.5" } // Indicators of support for version-specific supported MTU. @@ -82587,7 +82227,7 @@ type VMwareIpfixConfig struct { // Observation Domain Id is supported // in vSphere Distributed Switch Version 6.0 or later. // Legal value range is 0-((2^32)-1) - ObservationDomainId int64 `xml:"observationDomainId,omitempty" json:"observationDomainId,omitempty" vim:"6.0"` + ObservationDomainId int64 `xml:"observationDomainId,omitempty" json:"observationDomainId,omitempty"` // The number of seconds after which "active" flows are forced to be // exported to the collector. // @@ -82613,7 +82253,6 @@ type VMwareIpfixConfig struct { func init() { t["VMwareIpfixConfig"] = reflect.TypeOf((*VMwareIpfixConfig)(nil)).Elem() - minAPIVersionForType["VMwareIpfixConfig"] = "5.0" } // Deprecated as of vSphere API 5.5. @@ -82639,7 +82278,6 @@ type VMwareUplinkLacpPolicy struct { func init() { t["VMwareUplinkLacpPolicy"] = reflect.TypeOf((*VMwareUplinkLacpPolicy)(nil)).Elem() - minAPIVersionForType["VMwareUplinkLacpPolicy"] = "5.1" } // This data object type describes uplink port ordering policy for a @@ -82658,7 +82296,6 @@ type VMwareUplinkPortOrderPolicy struct { func init() { t["VMwareUplinkPortOrderPolicy"] = reflect.TypeOf((*VMwareUplinkPortOrderPolicy)(nil)).Elem() - minAPIVersionForType["VMwareUplinkPortOrderPolicy"] = "4.0" } // This class defines the ports, uplink ports name, vlans and IP addresses participating in a @@ -82685,7 +82322,7 @@ type VMwareVspanPort struct { WildcardPortConnecteeType []string `xml:"wildcardPortConnecteeType,omitempty" json:"wildcardPortConnecteeType,omitempty"` // Vlan Ids for ingress source of Remote Mirror destination // session. - Vlans []int32 `xml:"vlans,omitempty" json:"vlans,omitempty" vim:"5.1"` + Vlans []int32 `xml:"vlans,omitempty" json:"vlans,omitempty"` // IP address for the destination of encapsulated remote mirror source session, // IPv4 address is specified using dotted decimal notation. // @@ -82694,12 +82331,11 @@ type VMwareVspanPort struct { // of up to four hexadecimal digits. // A colon separates each field (:). For example, // 2001:DB8:101::230:6eff:fe04:d9ff. - IpAddress []string `xml:"ipAddress,omitempty" json:"ipAddress,omitempty" vim:"5.1"` + IpAddress []string `xml:"ipAddress,omitempty" json:"ipAddress,omitempty"` } func init() { t["VMwareVspanPort"] = reflect.TypeOf((*VMwareVspanPort)(nil)).Elem() - minAPIVersionForType["VMwareVspanPort"] = "5.0" } // The `VMwareVspanSession` data object @@ -82805,13 +82441,13 @@ type VMwareVspanSession struct { // `VMwareDVSVspanSessionType_enum` // for valid values. // Default value is mixedDestMirror if unspecified in a VSPAN create operation. - SessionType string `xml:"sessionType,omitempty" json:"sessionType,omitempty" vim:"5.1"` + SessionType string `xml:"sessionType,omitempty" json:"sessionType,omitempty"` // Sampling rate of the session. // // If its value is n, one of every n // packets is mirrored. // Valid values are between 1 to 65535, and default value is 1. - SamplingRate int32 `xml:"samplingRate,omitempty" json:"samplingRate,omitempty" vim:"5.1"` + SamplingRate int32 `xml:"samplingRate,omitempty" json:"samplingRate,omitempty"` // Encapsulation type of the session. // // See @@ -82819,35 +82455,34 @@ type VMwareVspanSession struct { // for valid values. // Default value is encapProtocolGRE if unspecified in a // VSPAN create operation. - EncapType string `xml:"encapType,omitempty" json:"encapType,omitempty" vim:"6.5"` + EncapType string `xml:"encapType,omitempty" json:"encapType,omitempty"` // ERSPAN ID of the session. // // Valid values are between 0 to 0x3ff, and default value is 0. // This value is applicable only if encaptType is // `erspan2` or // `erspan3` - ErspanId int32 `xml:"erspanId,omitempty" json:"erspanId,omitempty" vim:"6.5"` + ErspanId int32 `xml:"erspanId,omitempty" json:"erspanId,omitempty"` // Class of Service of the monitored frame. // // Valid values are between 0 to 7, and default value is 0. // This value is applicable only if encaptType is // `erspan2` or // `erspan3` - ErspanCOS int32 `xml:"erspanCOS,omitempty" json:"erspanCOS,omitempty" vim:"6.5"` + ErspanCOS int32 `xml:"erspanCOS,omitempty" json:"erspanCOS,omitempty"` // Timestamp Granularity. // // If the value is false, timestamp-granularity will be microsecond. // Otherwise the timestamp-granularity will be nanosecond // This value is applicable only if encaptType is // `erspan3` - ErspanGraNanosec *bool `xml:"erspanGraNanosec" json:"erspanGraNanosec,omitempty" vim:"6.5"` + ErspanGraNanosec *bool `xml:"erspanGraNanosec" json:"erspanGraNanosec,omitempty"` // Netstack instance of the session. - Netstack string `xml:"netstack,omitempty" json:"netstack,omitempty" vim:"6.7"` + Netstack string `xml:"netstack,omitempty" json:"netstack,omitempty"` } func init() { t["VMwareVspanSession"] = reflect.TypeOf((*VMwareVspanSession)(nil)).Elem() - minAPIVersionForType["VMwareVspanSession"] = "5.0" } // This data object type describes a virtual storage object. @@ -82860,7 +82495,6 @@ type VStorageObject struct { func init() { t["VStorageObject"] = reflect.TypeOf((*VStorageObject)(nil)).Elem() - minAPIVersionForType["VStorageObject"] = "6.5" } // This data object is a key-value pair whose key is the virtual storage @@ -82878,7 +82512,6 @@ type VStorageObjectAssociations struct { func init() { t["VStorageObjectAssociations"] = reflect.TypeOf((*VStorageObjectAssociations)(nil)).Elem() - minAPIVersionForType["VStorageObjectAssociations"] = "6.7" } // This data object contains infomation of a VM Disk associations. @@ -82893,7 +82526,6 @@ type VStorageObjectAssociationsVmDiskAssociations struct { func init() { t["VStorageObjectAssociationsVmDiskAssociations"] = reflect.TypeOf((*VStorageObjectAssociationsVmDiskAssociations)(nil)).Elem() - minAPIVersionForType["VStorageObjectAssociationsVmDiskAssociations"] = "6.7" } // Data object specifies Virtual storage object configuration @@ -82916,7 +82548,6 @@ type VStorageObjectConfigInfo struct { func init() { t["VStorageObjectConfigInfo"] = reflect.TypeOf((*VStorageObjectConfigInfo)(nil)).Elem() - minAPIVersionForType["VStorageObjectConfigInfo"] = "6.5" } // The parameters of `VStorageObjectManagerBase.VStorageObjectCreateSnapshotEx_Task`. @@ -82935,6 +82566,7 @@ type VStorageObjectCreateSnapshotExRequestType struct { func init() { t["VStorageObjectCreateSnapshotExRequestType"] = reflect.TypeOf((*VStorageObjectCreateSnapshotExRequestType)(nil)).Elem() + minAPIVersionForType["VStorageObjectCreateSnapshotExRequestType"] = "8.0.2.0" } type VStorageObjectCreateSnapshotEx_Task VStorageObjectCreateSnapshotExRequestType @@ -82991,6 +82623,7 @@ type VStorageObjectDeleteSnapshotExRequestType struct { func init() { t["VStorageObjectDeleteSnapshotExRequestType"] = reflect.TypeOf((*VStorageObjectDeleteSnapshotExRequestType)(nil)).Elem() + minAPIVersionForType["VStorageObjectDeleteSnapshotExRequestType"] = "8.0.2.0" } type VStorageObjectDeleteSnapshotEx_Task VStorageObjectDeleteSnapshotExRequestType @@ -83018,6 +82651,7 @@ type VStorageObjectExtendDiskExRequestType struct { func init() { t["VStorageObjectExtendDiskExRequestType"] = reflect.TypeOf((*VStorageObjectExtendDiskExRequestType)(nil)).Elem() + minAPIVersionForType["VStorageObjectExtendDiskExRequestType"] = "8.0.2.0" } type VStorageObjectExtendDiskEx_Task VStorageObjectExtendDiskExRequestType @@ -83057,7 +82691,6 @@ type VStorageObjectSnapshotDetails struct { func init() { t["VStorageObjectSnapshotDetails"] = reflect.TypeOf((*VStorageObjectSnapshotDetails)(nil)).Elem() - minAPIVersionForType["VStorageObjectSnapshotDetails"] = "6.7" } // This data object type contains the brief information of a @@ -83071,7 +82704,6 @@ type VStorageObjectSnapshotInfo struct { func init() { t["VStorageObjectSnapshotInfo"] = reflect.TypeOf((*VStorageObjectSnapshotInfo)(nil)).Elem() - minAPIVersionForType["VStorageObjectSnapshotInfo"] = "6.7" } type VStorageObjectSnapshotInfoVStorageObjectSnapshot struct { @@ -83110,7 +82742,6 @@ type VStorageObjectStateInfo struct { func init() { t["VStorageObjectStateInfo"] = reflect.TypeOf((*VStorageObjectStateInfo)(nil)).Elem() - minAPIVersionForType["VStorageObjectStateInfo"] = "6.5" } type VVolHostPE struct { @@ -83143,7 +82774,6 @@ type VVolVmConfigFileUpdateResult struct { func init() { t["VVolVmConfigFileUpdateResult"] = reflect.TypeOf((*VVolVmConfigFileUpdateResult)(nil)).Elem() - minAPIVersionForType["VVolVmConfigFileUpdateResult"] = "6.5" } // Information of the failed update on the virtual machine config @@ -83154,14 +82784,13 @@ type VVolVmConfigFileUpdateResultFailedVmConfigFileInfo struct { // The target virtual machine config VVol ID TargetConfigVVolId string `xml:"targetConfigVVolId" json:"targetConfigVVolId"` // Datastore path for the virtual machine that failed to recover - DsPath string `xml:"dsPath,omitempty" json:"dsPath,omitempty" vim:"7.0"` + DsPath string `xml:"dsPath,omitempty" json:"dsPath,omitempty"` // The reason why the update failed. Fault LocalizedMethodFault `xml:"fault" json:"fault"` } func init() { t["VVolVmConfigFileUpdateResultFailedVmConfigFileInfo"] = reflect.TypeOf((*VVolVmConfigFileUpdateResultFailedVmConfigFileInfo)(nil)).Elem() - minAPIVersionForType["VVolVmConfigFileUpdateResultFailedVmConfigFileInfo"] = "6.5" } type ValidateCredentialsInGuest ValidateCredentialsInGuestRequestType @@ -83410,11 +83039,12 @@ type VasaProviderContainerSpec struct { ScId string `xml:"scId" json:"scId"` // Indicates whether the container got deleted Deleted bool `xml:"deleted" json:"deleted"` + // Indicates whether container is stretched + Stretched *bool `xml:"stretched" json:"stretched,omitempty" vim:"8.0.3.0"` } func init() { t["VasaProviderContainerSpec"] = reflect.TypeOf((*VasaProviderContainerSpec)(nil)).Elem() - minAPIVersionForType["VasaProviderContainerSpec"] = "6.0" } // This event records when the VirtualCenter agent on a host failed to uninstall. @@ -83429,7 +83059,6 @@ type VcAgentUninstallFailedEvent struct { func init() { t["VcAgentUninstallFailedEvent"] = reflect.TypeOf((*VcAgentUninstallFailedEvent)(nil)).Elem() - minAPIVersionForType["VcAgentUninstallFailedEvent"] = "4.0" } // This event records when the VirtualCenter agent on a host is uninstalled. @@ -83439,7 +83068,6 @@ type VcAgentUninstalledEvent struct { func init() { t["VcAgentUninstalledEvent"] = reflect.TypeOf((*VcAgentUninstalledEvent)(nil)).Elem() - minAPIVersionForType["VcAgentUninstalledEvent"] = "4.0" } // This event records when the VirtualCenter agent on a host failed to upgrade. @@ -83449,7 +83077,7 @@ type VcAgentUpgradeFailedEvent struct { // The reason why the upgrade failed, if known. // // See `AgentInstallFailedReason_enum` - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"4.0"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { @@ -83488,7 +83116,6 @@ type VchaClusterConfigInfo struct { func init() { t["VchaClusterConfigInfo"] = reflect.TypeOf((*VchaClusterConfigInfo)(nil)).Elem() - minAPIVersionForType["VchaClusterConfigInfo"] = "6.5" } // The VchaClusterConfigSpec class contains IP addresses of @@ -83509,7 +83136,6 @@ type VchaClusterConfigSpec struct { func init() { t["VchaClusterConfigSpec"] = reflect.TypeOf((*VchaClusterConfigSpec)(nil)).Elem() - minAPIVersionForType["VchaClusterConfigSpec"] = "6.5" } // The VchaClusterDeploymentSpec class contains @@ -83534,7 +83160,6 @@ type VchaClusterDeploymentSpec struct { func init() { t["VchaClusterDeploymentSpec"] = reflect.TypeOf((*VchaClusterDeploymentSpec)(nil)).Elem() - minAPIVersionForType["VchaClusterDeploymentSpec"] = "6.5" } // The VchaClusterHealth class describes the overall @@ -83559,7 +83184,6 @@ type VchaClusterHealth struct { func init() { t["VchaClusterHealth"] = reflect.TypeOf((*VchaClusterHealth)(nil)).Elem() - minAPIVersionForType["VchaClusterHealth"] = "6.5" } // The VchaClusterNetworkSpec class contains network @@ -83575,7 +83199,6 @@ type VchaClusterNetworkSpec struct { func init() { t["VchaClusterNetworkSpec"] = reflect.TypeOf((*VchaClusterNetworkSpec)(nil)).Elem() - minAPIVersionForType["VchaClusterNetworkSpec"] = "6.5" } // The VchaClusterRuntimeInfo class describes the @@ -83601,7 +83224,6 @@ type VchaClusterRuntimeInfo struct { func init() { t["VchaClusterRuntimeInfo"] = reflect.TypeOf((*VchaClusterRuntimeInfo)(nil)).Elem() - minAPIVersionForType["VchaClusterRuntimeInfo"] = "6.5" } // The VchaNodeRuntimeInfo class describes a node's @@ -83625,7 +83247,6 @@ type VchaNodeRuntimeInfo struct { func init() { t["VchaNodeRuntimeInfo"] = reflect.TypeOf((*VchaNodeRuntimeInfo)(nil)).Elem() - minAPIVersionForType["VchaNodeRuntimeInfo"] = "6.5" } // Password for the Vim account user on the host has been changed. @@ -83637,7 +83258,6 @@ type VimAccountPasswordChangedEvent struct { func init() { t["VimAccountPasswordChangedEvent"] = reflect.TypeOf((*VimAccountPasswordChangedEvent)(nil)).Elem() - minAPIVersionForType["VimAccountPasswordChangedEvent"] = "2.5" } // The common base type for all virtual infrastructure management @@ -83668,7 +83288,7 @@ type VimVasaProvider struct { // helps in preventing a regeneration of duplicate VASA Provider within // vvold when a user attempts to register the same VP using different names // or alternative urls. - Uid string `xml:"uid,omitempty" json:"uid,omitempty" vim:"6.7"` + Uid string `xml:"uid,omitempty" json:"uid,omitempty"` // VASA Provider URL. // // In VirtualHost based MultiVC setup, @@ -83696,7 +83316,6 @@ type VimVasaProvider struct { func init() { t["VimVasaProvider"] = reflect.TypeOf((*VimVasaProvider)(nil)).Elem() - minAPIVersionForType["VimVasaProvider"] = "6.0" } // Data object representing VASA Provider information. @@ -83711,7 +83330,6 @@ type VimVasaProviderInfo struct { func init() { t["VimVasaProviderInfo"] = reflect.TypeOf((*VimVasaProviderInfo)(nil)).Elem() - minAPIVersionForType["VimVasaProviderInfo"] = "6.0" } // Per Storage Array VP status. @@ -83729,7 +83347,6 @@ type VimVasaProviderStatePerArray struct { func init() { t["VimVasaProviderStatePerArray"] = reflect.TypeOf((*VimVasaProviderStatePerArray)(nil)).Elem() - minAPIVersionForType["VimVasaProviderStatePerArray"] = "6.0" } // Holds VirtualHost configuration information when VASA 5.0 or greater VVOL VASA Provider @@ -83764,7 +83381,6 @@ type VirtualAHCIController struct { func init() { t["VirtualAHCIController"] = reflect.TypeOf((*VirtualAHCIController)(nil)).Elem() - minAPIVersionForType["VirtualAHCIController"] = "5.5" } // VirtualAHCIControllerOption is the data object that contains @@ -83775,7 +83391,6 @@ type VirtualAHCIControllerOption struct { func init() { t["VirtualAHCIControllerOption"] = reflect.TypeOf((*VirtualAHCIControllerOption)(nil)).Elem() - minAPIVersionForType["VirtualAHCIControllerOption"] = "5.5" } // A VAppImportSpec is used by `ResourcePool.importVApp` when importing vApps (single VM or multi-VM). @@ -83805,7 +83420,6 @@ type VirtualAppImportSpec struct { func init() { t["VirtualAppImportSpec"] = reflect.TypeOf((*VirtualAppImportSpec)(nil)).Elem() - minAPIVersionForType["VirtualAppImportSpec"] = "4.0" } // Deprecated as of vSphere API 5.1. @@ -83824,7 +83438,6 @@ type VirtualAppLinkInfo struct { func init() { t["VirtualAppLinkInfo"] = reflect.TypeOf((*VirtualAppLinkInfo)(nil)).Elem() - minAPIVersionForType["VirtualAppLinkInfo"] = "4.1" } // This data object type encapsulates a typical set of resource @@ -83843,9 +83456,9 @@ type VirtualAppSummary struct { // // A stopped vApp is marked as suspended // under the following conditions: - // - All child virtual machines are either suspended or powered-off. - // - There is at least one suspended virtual machine for which the - // stop action is not "suspend". + // - All child virtual machines are either suspended or powered-off. + // - There is at least one suspended virtual machine for which the + // stop action is not "suspend". // // If the vAppState property is "stopped", the value is set to true if the vApp is // suspended (according the the above definition). @@ -83855,17 +83468,16 @@ type VirtualAppSummary struct { // from a suspended state, respectively. // // If the vAppState property is "started", then the suspend flag is set to false. - Suspended *bool `xml:"suspended" json:"suspended,omitempty" vim:"4.1"` + Suspended *bool `xml:"suspended" json:"suspended,omitempty"` // Whether one or more VMs in this vApp require a reboot to finish // installation. InstallBootRequired *bool `xml:"installBootRequired" json:"installBootRequired,omitempty"` // vCenter-specific UUID of the vApp - InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty" vim:"4.1"` + InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty"` } func init() { t["VirtualAppSummary"] = reflect.TypeOf((*VirtualAppSummary)(nil)).Elem() - minAPIVersionForType["VirtualAppSummary"] = "4.0" } // VirtualBusLogicController is the data object that represents @@ -84118,7 +83730,7 @@ type VirtualDevice struct { // this property is null. Connectable *VirtualDeviceConnectInfo `xml:"connectable,omitempty" json:"connectable,omitempty"` // Information about the bus slot of a device in a virtual machine. - SlotInfo BaseVirtualDeviceBusSlotInfo `xml:"slotInfo,omitempty,typeattr" json:"slotInfo,omitempty" vim:"5.1"` + SlotInfo BaseVirtualDeviceBusSlotInfo `xml:"slotInfo,omitempty,typeattr" json:"slotInfo,omitempty"` // Object key for the controller object for this device. // // This property contains the key property value of the controller device @@ -84197,7 +83809,6 @@ type VirtualDeviceBusSlotInfo struct { func init() { t["VirtualDeviceBusSlotInfo"] = reflect.TypeOf((*VirtualDeviceBusSlotInfo)(nil)).Elem() - minAPIVersionForType["VirtualDeviceBusSlotInfo"] = "5.1" } // The `VirtualDeviceBusSlotOption` data class @@ -84212,7 +83823,6 @@ type VirtualDeviceBusSlotOption struct { func init() { t["VirtualDeviceBusSlotOption"] = reflect.TypeOf((*VirtualDeviceBusSlotOption)(nil)).Elem() - minAPIVersionForType["VirtualDeviceBusSlotOption"] = "5.1" } // The VirtualDeviceSpec data object type encapsulates change @@ -84247,13 +83857,13 @@ type VirtualDeviceConfigSpec struct { // interact with SPBM service. // This is an optional parameter and if user doesn't specify profile, // the default behavior will apply. - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty" vim:"5.5"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty"` // BackingInfo configuration options. // // Each BackingSpec corresponds to a BackingInfo object. The member // `VirtualDeviceConfigSpec.backing` refers to the // `VirtualDeviceConfigSpec.device*.*VirtualDevice.backing`. - Backing *VirtualDeviceConfigSpecBackingSpec `xml:"backing,omitempty" json:"backing,omitempty" vim:"6.5"` + Backing *VirtualDeviceConfigSpecBackingSpec `xml:"backing,omitempty" json:"backing,omitempty"` // List of independent filters `VirtualMachineIndependentFilterSpec` // to configure on the virtual device. FilterSpec []BaseVirtualMachineBaseIndependentFilterSpec `xml:"filterSpec,omitempty,typeattr" json:"filterSpec,omitempty" vim:"7.0.2.1"` @@ -84283,7 +83893,6 @@ type VirtualDeviceConfigSpecBackingSpec struct { func init() { t["VirtualDeviceConfigSpecBackingSpec"] = reflect.TypeOf((*VirtualDeviceConfigSpecBackingSpec)(nil)).Elem() - minAPIVersionForType["VirtualDeviceConfigSpecBackingSpec"] = "6.5" } // The `VirtualDeviceConnectInfo` data object type @@ -84308,7 +83917,7 @@ type VirtualDeviceConnectInfo struct { // reaches the desired connection state. // The set of possible values are described in // `VirtualDeviceConnectInfoMigrateConnectOp_enum`. - MigrateConnect string `xml:"migrateConnect,omitempty" json:"migrateConnect,omitempty" vim:"6.7"` + MigrateConnect string `xml:"migrateConnect,omitempty" json:"migrateConnect,omitempty"` // Specifies whether or not to connect the device // when the virtual machine starts. StartConnected bool `xml:"startConnected" json:"startConnected"` @@ -84324,7 +83933,7 @@ type VirtualDeviceConnectInfo struct { // Valid only while the // virtual machine is running. The set of possible values is described in // `VirtualDeviceConnectInfoStatus_enum` - Status string `xml:"status,omitempty" json:"status,omitempty" vim:"4.0"` + Status string `xml:"status,omitempty" json:"status,omitempty"` } func init() { @@ -84361,7 +83970,7 @@ type VirtualDeviceDeviceBackingInfo struct { // // If this value is set to TRUE, // deviceName is ignored. - UseAutoDetect *bool `xml:"useAutoDetect" json:"useAutoDetect,omitempty" vim:"2.5"` + UseAutoDetect *bool `xml:"useAutoDetect" json:"useAutoDetect,omitempty"` } func init() { @@ -84375,7 +83984,7 @@ type VirtualDeviceDeviceBackingOption struct { // Flag to indicate whether the specific instance of this device can // be auto-detected on the host instead of having to specify a // particular physical device. - AutoDetectAvailable BoolOption `xml:"autoDetectAvailable" json:"autoDetectAvailable" vim:"2.5"` + AutoDetectAvailable BoolOption `xml:"autoDetectAvailable" json:"autoDetectAvailable"` } func init() { @@ -84420,7 +84029,7 @@ type VirtualDeviceFileBackingInfo struct { // Backing object's durable and unmutable identifier. // // Each backing object has a unique identifier which is not settable. - BackingObjectId string `xml:"backingObjectId,omitempty" json:"backingObjectId,omitempty" vim:"5.5"` + BackingObjectId string `xml:"backingObjectId,omitempty" json:"backingObjectId,omitempty"` } func init() { @@ -84462,7 +84071,7 @@ type VirtualDeviceOption struct { ConnectOption *VirtualDeviceConnectOption `xml:"connectOption,omitempty" json:"connectOption,omitempty"` // If the device can use a bus slot configuration, then the busSlotOption // describes the bus slot options. - BusSlotOption *VirtualDeviceBusSlotOption `xml:"busSlotOption,omitempty" json:"busSlotOption,omitempty" vim:"5.1"` + BusSlotOption *VirtualDeviceBusSlotOption `xml:"busSlotOption,omitempty" json:"busSlotOption,omitempty"` // Data object type that denotes the controller option object that is // valid for controlling this device. ControllerType string `xml:"controllerType,omitempty" json:"controllerType,omitempty"` @@ -84498,8 +84107,8 @@ type VirtualDeviceOption struct { PlugAndPlay bool `xml:"plugAndPlay" json:"plugAndPlay"` // Indicates if this type of device can be hot-removed from the virtual machine // via a reconfigure operation when the virtual machine is powered on. - HotRemoveSupported *bool `xml:"hotRemoveSupported" json:"hotRemoveSupported,omitempty" vim:"2.5 U2"` - NumaSupported *bool `xml:"numaSupported" json:"numaSupported,omitempty"` + HotRemoveSupported *bool `xml:"hotRemoveSupported" json:"hotRemoveSupported,omitempty"` + NumaSupported *bool `xml:"numaSupported" json:"numaSupported,omitempty" vim:"8.0.0.1"` } func init() { @@ -84528,7 +84137,6 @@ type VirtualDevicePciBusSlotInfo struct { func init() { t["VirtualDevicePciBusSlotInfo"] = reflect.TypeOf((*VirtualDevicePciBusSlotInfo)(nil)).Elem() - minAPIVersionForType["VirtualDevicePciBusSlotInfo"] = "5.1" } // The `VirtualDevicePipeBackingInfo` data object type @@ -84572,7 +84180,7 @@ type VirtualDeviceRemoteDeviceBackingInfo struct { // // If this value is set to TRUE, // deviceName is ignored. - UseAutoDetect *bool `xml:"useAutoDetect" json:"useAutoDetect,omitempty" vim:"2.5"` + UseAutoDetect *bool `xml:"useAutoDetect" json:"useAutoDetect,omitempty"` } func init() { @@ -84592,7 +84200,7 @@ type VirtualDeviceRemoteDeviceBackingOption struct { // Flag to indicate whether the specific instance of this device can // be auto-detected on the host instead of having to specify a // particular physical device. - AutoDetectAvailable BoolOption `xml:"autoDetectAvailable" json:"autoDetectAvailable" vim:"2.5"` + AutoDetectAvailable BoolOption `xml:"autoDetectAvailable" json:"autoDetectAvailable"` } func init() { @@ -84606,12 +84214,12 @@ type VirtualDeviceURIBackingInfo struct { // Identifies the local host or a system on the network, // depending on the value of `VirtualDeviceURIBackingInfo.direction`. - // - If you use the virtual machine as a server, the URI identifies - // the host on which the virtual machine runs. In this case, - // the host name part of the URI should be empty, or it should - // specify the address of the local host. - // - If you use the virtual machine as a client, the URI identifies - // the remote system on the network. + // - If you use the virtual machine as a server, the URI identifies + // the host on which the virtual machine runs. In this case, + // the host name part of the URI should be empty, or it should + // specify the address of the local host. + // - If you use the virtual machine as a client, the URI identifies + // the remote system on the network. ServiceURI string `xml:"serviceURI" json:"serviceURI"` // The direction of the connection. // @@ -84629,7 +84237,6 @@ type VirtualDeviceURIBackingInfo struct { func init() { t["VirtualDeviceURIBackingInfo"] = reflect.TypeOf((*VirtualDeviceURIBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualDeviceURIBackingInfo"] = "4.1" } // The `VirtualDeviceURIBackingOption` data object type describes network communication @@ -84645,14 +84252,13 @@ type VirtualDeviceURIBackingOption struct { // List of possible directions. // // Valid directions are: - // - `server` - // - `client` + // - `server` + // - `client` Directions ChoiceOption `xml:"directions" json:"directions"` } func init() { t["VirtualDeviceURIBackingOption"] = reflect.TypeOf((*VirtualDeviceURIBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualDeviceURIBackingOption"] = "4.1" } // This data object type contains information about a disk in a virtual machine. @@ -84701,7 +84307,7 @@ type VirtualDisk struct { // capacity of an existing virtual disk, but can omit it otherwise. // If the disk is on a Virtual Volume datastore the disk size must be a multiple // of a megabyte. - CapacityInBytes int64 `xml:"capacityInBytes,omitempty" json:"capacityInBytes,omitempty" vim:"5.5"` + CapacityInBytes int64 `xml:"capacityInBytes,omitempty" json:"capacityInBytes,omitempty"` // Deprecated as of vSphere API 4.1, use // `StorageIOAllocationInfo.shares`. // @@ -84710,7 +84316,7 @@ type VirtualDisk struct { // Deprecated as of vSphere API 6.5, use. // // Resource allocation for storage I/O. - StorageIOAllocation *StorageIOAllocationInfo `xml:"storageIOAllocation,omitempty" json:"storageIOAllocation,omitempty" vim:"4.1"` + StorageIOAllocation *StorageIOAllocationInfo `xml:"storageIOAllocation,omitempty" json:"storageIOAllocation,omitempty"` // Deprecated as of vSphere API 6.5, use `VirtualDisk.vDiskId`. // // Virtual disk durable and unmutable identifier. @@ -84719,25 +84325,25 @@ type VirtualDisk struct { // VirtualDiskManager APIs. // This identifier is a universally unique identifier which is not settable. // VirtualDisk can remain in existence even if it is not associated with VM. - DiskObjectId string `xml:"diskObjectId,omitempty" json:"diskObjectId,omitempty" vim:"5.5"` + DiskObjectId string `xml:"diskObjectId,omitempty" json:"diskObjectId,omitempty"` // Deprecated since vSphere 7.0 because vFlash Read Cache // end of availability. // // vFlash cache configuration supported on this virtual disk. - VFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty" json:"vFlashCacheConfigInfo,omitempty" vim:"5.5"` + VFlashCacheConfigInfo *VirtualDiskVFlashCacheConfigInfo `xml:"vFlashCacheConfigInfo,omitempty" json:"vFlashCacheConfigInfo,omitempty"` // IDs of the IO Filters associated with the virtual disk. // // See `IoFilterInfo.id`. This information // is provided when retrieving configuration information for // an existing virtual machine. The client cannot modify this information // on a virtual machine. - Iofilter []string `xml:"iofilter,omitempty" json:"iofilter,omitempty" vim:"6.0"` + Iofilter []string `xml:"iofilter,omitempty" json:"iofilter,omitempty"` // ID of the virtual disk object as the first class entity. // // See `ID` // The ID is a universally unique identifier for the disk lifecycle, // even if the virtual disk is not associated with VM. - VDiskId *ID `xml:"vDiskId,omitempty" json:"vDiskId,omitempty" vim:"6.5"` + VDiskId *ID `xml:"vDiskId,omitempty" json:"vDiskId,omitempty"` // Disk descriptor version of the virtual disk. VDiskVersion int32 `xml:"vDiskVersion,omitempty" json:"vDiskVersion,omitempty" vim:"8.0.1.0"` // Indicates whether a disk with @@ -84745,7 +84351,7 @@ type VirtualDisk struct { // clone from an unmanaged delta disk and hence the // `VirtualDiskFlatVer2BackingInfo.parent` chain to // this delta disk will not be available. - NativeUnmanagedLinkedClone *bool `xml:"nativeUnmanagedLinkedClone" json:"nativeUnmanagedLinkedClone,omitempty" vim:"6.7"` + NativeUnmanagedLinkedClone *bool `xml:"nativeUnmanagedLinkedClone" json:"nativeUnmanagedLinkedClone,omitempty"` // The IDs of the independent filters associated with the virtual disk. // // This information is provided when retrieving configuration information for @@ -84774,7 +84380,6 @@ type VirtualDiskAntiAffinityRuleSpec struct { func init() { t["VirtualDiskAntiAffinityRuleSpec"] = reflect.TypeOf((*VirtualDiskAntiAffinityRuleSpec)(nil)).Elem() - minAPIVersionForType["VirtualDiskAntiAffinityRuleSpec"] = "5.0" } // The disk blocks of the specified virtual disk have not been fully @@ -84790,7 +84395,6 @@ type VirtualDiskBlocksNotFullyProvisioned struct { func init() { t["VirtualDiskBlocksNotFullyProvisioned"] = reflect.TypeOf((*VirtualDiskBlocksNotFullyProvisioned)(nil)).Elem() - minAPIVersionForType["VirtualDiskBlocksNotFullyProvisioned"] = "4.0" } type VirtualDiskBlocksNotFullyProvisionedFault VirtualDiskBlocksNotFullyProvisioned @@ -84816,7 +84420,7 @@ type VirtualDiskConfigSpec struct { // // If left unset then `moveAllDiskBackingsAndDisallowSharing` // is assumed. - DiskMoveType string `xml:"diskMoveType,omitempty" json:"diskMoveType,omitempty" vim:"6.0"` + DiskMoveType string `xml:"diskMoveType,omitempty" json:"diskMoveType,omitempty"` // Deprecated since vSphere 7.0 because vFlash Read Cache // end of availability. // @@ -84841,7 +84445,6 @@ type VirtualDiskConfigSpec struct { func init() { t["VirtualDiskConfigSpec"] = reflect.TypeOf((*VirtualDiskConfigSpec)(nil)).Elem() - minAPIVersionForType["VirtualDiskConfigSpec"] = "5.5" } // Delta disk format supported for each datastore type. @@ -84853,14 +84456,13 @@ type VirtualDiskDeltaDiskFormatsSupported struct { // Delta disk formats supported. // // Valid values are: - // - `redoLogFormat` - // - `nativeFormat` + // - `redoLogFormat` + // - `nativeFormat` DeltaDiskFormat ChoiceOption `xml:"deltaDiskFormat" json:"deltaDiskFormat"` } func init() { t["VirtualDiskDeltaDiskFormatsSupported"] = reflect.TypeOf((*VirtualDiskDeltaDiskFormatsSupported)(nil)).Elem() - minAPIVersionForType["VirtualDiskDeltaDiskFormatsSupported"] = "5.1" } // This data object type contains information about backing a virtual disk by @@ -84875,9 +84477,9 @@ type VirtualDiskFlatVer1BackingInfo struct { // The disk persistence mode. // // Valid modes are: - // - `persistent` - // - `nonpersistent` - // - `undoable` + // - `persistent` + // - `nonpersistent` + // - `undoable` // // See also `VirtualDiskMode_enum`. DiskMode string `xml:"diskMode" json:"diskMode"` @@ -84899,7 +84501,7 @@ type VirtualDiskFlatVer1BackingInfo struct { // The guarantee provided by the content ID is that if two disk backings have the // same content ID and are not currently being written to, then reads issued from // the guest operating system to those disk backings will return the same data. - ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty" vim:"4.0"` + ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty"` // The parent of this virtual disk file, if this is a delta disk backing. // // This will be unset if this is not a delta disk backing. @@ -84933,7 +84535,7 @@ type VirtualDiskFlatVer1BackingInfo struct { // This property may only be set if // `deltaDiskBackingsSupported` // is true. - Parent *VirtualDiskFlatVer1BackingInfo `xml:"parent,omitempty" json:"parent,omitempty" vim:"4.0"` + Parent *VirtualDiskFlatVer1BackingInfo `xml:"parent,omitempty" json:"parent,omitempty"` } func init() { @@ -84950,12 +84552,12 @@ type VirtualDiskFlatVer1BackingOption struct { // The disk mode. // // Valid disk modes are: - // - `persistent` - // - `nonpersistent` - // - `undoable` - // - `independent_persistent` - // - `independent_nonpersistent` - // - `append` + // - `persistent` + // - `nonpersistent` + // - `undoable` + // - `independent_persistent` + // - `independent_nonpersistent` + // - `append` // // See also `VirtualDiskMode_enum`. DiskMode ChoiceOption `xml:"diskMode" json:"diskMode"` @@ -84989,12 +84591,12 @@ type VirtualDiskFlatVer2BackingInfo struct { // The disk persistence mode. // // Valid modes are: - // - `persistent` - // - `independent_persistent` - // - `independent_nonpersistent` - // - `nonpersistent` - // - `undoable` - // - `append` + // - `persistent` + // - `independent_persistent` + // - `independent_nonpersistent` + // - `nonpersistent` + // - `undoable` + // - `append` // // See also `VirtualDiskMode_enum`. DiskMode string `xml:"diskMode" json:"diskMode"` @@ -85040,9 +84642,9 @@ type VirtualDiskFlatVer2BackingInfo struct { // it is ignored. // When returned as part of a `VirtualMachineConfigInfo`, this // property may be unset if its value is unknown. - EagerlyScrub *bool `xml:"eagerlyScrub" json:"eagerlyScrub,omitempty" vim:"4.0"` + EagerlyScrub *bool `xml:"eagerlyScrub" json:"eagerlyScrub,omitempty"` // Disk UUID for the virtual disk, if available. - Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty" vim:"2.5"` + Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty"` // Content ID of the virtual disk file, if available. // // A content ID indicates the logical contents of the disk backing and its parents. @@ -85054,14 +84656,14 @@ type VirtualDiskFlatVer2BackingInfo struct { // The guarantee provided by the content ID is that if two disk backings have the // same content ID and are not currently being written to, then reads issued from // the guest operating system to those disk backings will return the same data. - ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty" vim:"4.0"` + ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty"` // The change ID of the virtual disk for the corresponding // snapshot or virtual machine. // // This can be used to track // incremental changes to a virtual disk. See // `VirtualMachine.QueryChangedDiskAreas`. - ChangeId string `xml:"changeId,omitempty" json:"changeId,omitempty" vim:"4.0"` + ChangeId string `xml:"changeId,omitempty" json:"changeId,omitempty"` // The parent of this virtual disk file, if this is a delta disk backing. // // This will be unset if this is not a delta disk backing. @@ -85095,7 +84697,7 @@ type VirtualDiskFlatVer2BackingInfo struct { // This property may only be set if // `deltaDiskBackingsSupported` // is true. - Parent *VirtualDiskFlatVer2BackingInfo `xml:"parent,omitempty" json:"parent,omitempty" vim:"4.0"` + Parent *VirtualDiskFlatVer2BackingInfo `xml:"parent,omitempty" json:"parent,omitempty"` // The format of the delta disk. // // This field is valid only for a delta disk. @@ -85113,9 +84715,9 @@ type VirtualDiskFlatVer2BackingInfo struct { // vSphere server does not support relocation of virtual machines with // `nativeFormat`. // An exception is thrown for such requests. - DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty" json:"deltaDiskFormat,omitempty" vim:"5.0"` + DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty" json:"deltaDiskFormat,omitempty"` // Indicates whether the disk backing has digest file enabled. - DigestEnabled *bool `xml:"digestEnabled" json:"digestEnabled,omitempty" vim:"5.0"` + DigestEnabled *bool `xml:"digestEnabled" json:"digestEnabled,omitempty"` // Grain size in kB for a delta disk of format type seSparseFormat. // // The default @@ -85125,7 +84727,7 @@ type VirtualDiskFlatVer2BackingInfo struct { // when the base disk is of type FlatVer2BackingInfo. // The `DeltaDiskFormat` must also // be set to seSparseFormat. - DeltaGrainSize int32 `xml:"deltaGrainSize,omitempty" json:"deltaGrainSize,omitempty" vim:"5.1"` + DeltaGrainSize int32 `xml:"deltaGrainSize,omitempty" json:"deltaGrainSize,omitempty"` // The delta disk format variant, if applicable. // // This field is valid only for a delta disk and may specify more detailed @@ -85142,18 +84744,18 @@ type VirtualDiskFlatVer2BackingInfo struct { // `vmfsSparseVariant`. // // For other delta disk formats, this currently remains unspecified. - DeltaDiskFormatVariant string `xml:"deltaDiskFormatVariant,omitempty" json:"deltaDiskFormatVariant,omitempty" vim:"6.0"` + DeltaDiskFormatVariant string `xml:"deltaDiskFormatVariant,omitempty" json:"deltaDiskFormatVariant,omitempty"` // The sharing mode of the virtual disk. // // See `VirtualDiskSharing_enum`. The default value is // no sharing. - Sharing string `xml:"sharing,omitempty" json:"sharing,omitempty" vim:"6.0"` + Sharing string `xml:"sharing,omitempty" json:"sharing,omitempty"` // Virtual Disk Backing encryption options. // // On modification operations the value is ignored, use the specification // `VirtualDeviceConfigSpecBackingSpec.crypto` in // `VirtualDeviceConfigSpec.backing`. - KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty" vim:"6.5"` + KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty"` } func init() { @@ -85171,9 +84773,9 @@ type VirtualDiskFlatVer2BackingOption struct { // The disk mode. // // Valid disk modes are: - // - `persistent` - // - `independent_persistent` - // - `independent_nonpersistent` + // - `persistent` + // - `independent_persistent` + // - `independent_nonpersistent` // // See also `VirtualDiskMode_enum`. DiskMode ChoiceOption `xml:"diskMode" json:"diskMode"` @@ -85202,9 +84804,9 @@ type VirtualDiskFlatVer2BackingOption struct { // with a `VirtualDisk.capacityInKB` value greater // than its current value will grow the disk to the newly specified size // while the virtual machine is powered on. - HotGrowable bool `xml:"hotGrowable" json:"hotGrowable" vim:"2.5"` + HotGrowable bool `xml:"hotGrowable" json:"hotGrowable"` // Flag to indicate whether this backing supports disk UUID property. - Uuid bool `xml:"uuid" json:"uuid" vim:"2.5"` + Uuid bool `xml:"uuid" json:"uuid"` // Flag to indicate if this backing supports thin-provisioned disks. // // When creating a thin-provisioned disk (or converting an existing disk to @@ -85212,20 +84814,20 @@ type VirtualDiskFlatVer2BackingOption struct { // host accessing it must support thin-provisioning. This flag indicates only // the host capability. See `DatastoreCapability.perFileThinProvisioningSupported` // for datastore capability. - ThinProvisioned *BoolOption `xml:"thinProvisioned,omitempty" json:"thinProvisioned,omitempty" vim:"4.0"` + ThinProvisioned *BoolOption `xml:"thinProvisioned,omitempty" json:"thinProvisioned,omitempty"` // Flag to indicate if this backing supports eager scrubbing. - EagerlyScrub *BoolOption `xml:"eagerlyScrub,omitempty" json:"eagerlyScrub,omitempty" vim:"4.0"` + EagerlyScrub *BoolOption `xml:"eagerlyScrub,omitempty" json:"eagerlyScrub,omitempty"` // Deprecated as of vSphere API 5.1, please use // `VirtualDiskFlatVer2BackingOption.deltaDiskFormatsSupported`. // // Delta disk formats supported. // // Valid values are: - // - `redoLogFormat` - // - `nativeFormat` - DeltaDiskFormat *ChoiceOption `xml:"deltaDiskFormat,omitempty" json:"deltaDiskFormat,omitempty" vim:"5.0"` + // - `redoLogFormat` + // - `nativeFormat` + DeltaDiskFormat *ChoiceOption `xml:"deltaDiskFormat,omitempty" json:"deltaDiskFormat,omitempty"` // Delta disk formats supported for each datastore type. - DeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:"deltaDiskFormatsSupported,omitempty" json:"deltaDiskFormatsSupported,omitempty" vim:"5.1"` + DeltaDiskFormatsSupported []VirtualDiskDeltaDiskFormatsSupported `xml:"deltaDiskFormatsSupported,omitempty" json:"deltaDiskFormatsSupported,omitempty"` } func init() { @@ -85246,7 +84848,6 @@ type VirtualDiskId struct { func init() { t["VirtualDiskId"] = reflect.TypeOf((*VirtualDiskId)(nil)).Elem() - minAPIVersionForType["VirtualDiskId"] = "5.0" } // This data object type contains information about backing a virtual disk @@ -85287,7 +84888,6 @@ type VirtualDiskLocalPMemBackingInfo struct { func init() { t["VirtualDiskLocalPMemBackingInfo"] = reflect.TypeOf((*VirtualDiskLocalPMemBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualDiskLocalPMemBackingInfo"] = "6.7" } // This data object type contains the available options when backing @@ -85321,7 +84921,6 @@ type VirtualDiskLocalPMemBackingOption struct { func init() { t["VirtualDiskLocalPMemBackingOption"] = reflect.TypeOf((*VirtualDiskLocalPMemBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualDiskLocalPMemBackingOption"] = "6.7" } // The disk mode of the specified virtual disk is not supported. @@ -85339,7 +84938,6 @@ type VirtualDiskModeNotSupported struct { func init() { t["VirtualDiskModeNotSupported"] = reflect.TypeOf((*VirtualDiskModeNotSupported)(nil)).Elem() - minAPIVersionForType["VirtualDiskModeNotSupported"] = "4.1" } type VirtualDiskModeNotSupportedFault VirtualDiskModeNotSupported @@ -85355,15 +84953,17 @@ type VirtualDiskOption struct { // Minimum, maximum, and default capacity of the disk. CapacityInKB LongOption `xml:"capacityInKB" json:"capacityInKB"` + // Deprecated as of vSphere8.0 U3, and there is no replacement for it. + // // Minimum, maximum, and default values for Storage I/O allocation. // // See also `StorageIOAllocationInfo`. - IoAllocationOption *StorageIOAllocationOption `xml:"ioAllocationOption,omitempty" json:"ioAllocationOption,omitempty" vim:"4.1"` + IoAllocationOption *StorageIOAllocationOption `xml:"ioAllocationOption,omitempty" json:"ioAllocationOption,omitempty"` // Deprecated since vSphere 7.0 because vFlash Read Cache // end of availability. // // vFlash cache configuration on the disk. - VFlashCacheConfigOption *VirtualDiskOptionVFlashCacheConfigOption `xml:"vFlashCacheConfigOption,omitempty" json:"vFlashCacheConfigOption,omitempty" vim:"5.5"` + VFlashCacheConfigOption *VirtualDiskOptionVFlashCacheConfigOption `xml:"vFlashCacheConfigOption,omitempty" json:"vFlashCacheConfigOption,omitempty"` } func init() { @@ -85392,7 +84992,6 @@ type VirtualDiskOptionVFlashCacheConfigOption struct { func init() { t["VirtualDiskOptionVFlashCacheConfigOption"] = reflect.TypeOf((*VirtualDiskOptionVFlashCacheConfigOption)(nil)).Elem() - minAPIVersionForType["VirtualDiskOptionVFlashCacheConfigOption"] = "5.5" } // This data object type contains information about backing a virtual disk @@ -85469,12 +85068,12 @@ type VirtualDiskRawDiskMappingVer1BackingInfo struct { // The disk mode. // // Valid values are: - // - `persistent` - // - `independent_persistent` - // - `independent_nonpersistent` - // - `nonpersistent` - // - `undoable` - // - `append` + // - `persistent` + // - `independent_persistent` + // - `independent_nonpersistent` + // - `nonpersistent` + // - `undoable` + // - `append` // // Disk modes are only supported when the raw disk mapping is using virtual // compatibility mode. @@ -85485,7 +85084,7 @@ type VirtualDiskRawDiskMappingVer1BackingInfo struct { // // Disk UUID is not available if // the raw disk mapping is in physical compatibility mode. - Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty" vim:"2.5"` + Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty"` // Content ID of the virtual disk file, if available. // // A content ID indicates the logical contents of the disk backing and its parents. @@ -85497,14 +85096,14 @@ type VirtualDiskRawDiskMappingVer1BackingInfo struct { // The guarantee provided by the content ID is that if two disk backings have the // same content ID and are not currently being written to, then reads issued from // the guest operating system to those disk backings will return the same data. - ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty" vim:"4.0"` + ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty"` // The change ID of the virtual disk for the corresponding // snapshot or virtual machine. // // This can be used to track // incremental changes to a virtual disk. See // `VirtualMachine.QueryChangedDiskAreas`. - ChangeId string `xml:"changeId,omitempty" json:"changeId,omitempty" vim:"4.0"` + ChangeId string `xml:"changeId,omitempty" json:"changeId,omitempty"` // The parent of this virtual disk file, if this is a delta disk backing. // // This will be unset if this is not a delta disk backing. @@ -85540,7 +85139,7 @@ type VirtualDiskRawDiskMappingVer1BackingInfo struct { // This property may only be set if // `deltaDiskBackingsSupported` // is true. - Parent *VirtualDiskRawDiskMappingVer1BackingInfo `xml:"parent,omitempty" json:"parent,omitempty" vim:"4.0"` + Parent *VirtualDiskRawDiskMappingVer1BackingInfo `xml:"parent,omitempty" json:"parent,omitempty"` // The format of the delta disk. // // This field is valid only for a delta disk. @@ -85555,7 +85154,7 @@ type VirtualDiskRawDiskMappingVer1BackingInfo struct { // // `nativeFormat` is not // supported for bask disk of type RawDiskMappingVer1BackingInfo. - DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty" json:"deltaDiskFormat,omitempty" vim:"6.7"` + DeltaDiskFormat string `xml:"deltaDiskFormat,omitempty" json:"deltaDiskFormat,omitempty"` // Grain size in kB for a delta disk of format type seSparseFormat. // // The default @@ -85565,12 +85164,12 @@ type VirtualDiskRawDiskMappingVer1BackingInfo struct { // when the base disk is of type RawDiskMappingVer1BackingInfo. // The `DeltaDiskFormat` must also // be set to seSparseFormat. - DeltaGrainSize int32 `xml:"deltaGrainSize,omitempty" json:"deltaGrainSize,omitempty" vim:"6.7"` + DeltaGrainSize int32 `xml:"deltaGrainSize,omitempty" json:"deltaGrainSize,omitempty"` // The sharing mode of the virtual disk. // // See `VirtualDiskSharing_enum`. The default value is // no sharing. - Sharing string `xml:"sharing,omitempty" json:"sharing,omitempty" vim:"6.0"` + Sharing string `xml:"sharing,omitempty" json:"sharing,omitempty"` } func init() { @@ -85595,14 +85194,14 @@ type VirtualDiskRawDiskMappingVer1BackingOption struct { // The disk mode. // // Valid values are: - // - `persistent` - // - `independent_persistent` - // - `independent_nonpersistent` + // - `persistent` + // - `independent_persistent` + // - `independent_nonpersistent` // // See also `VirtualDiskMode_enum`. DiskMode ChoiceOption `xml:"diskMode" json:"diskMode"` // Flag to indicate whether this backing supports disk UUID property. - Uuid bool `xml:"uuid" json:"uuid" vim:"2.5"` + Uuid bool `xml:"uuid" json:"uuid"` } func init() { @@ -85617,19 +85216,19 @@ type VirtualDiskRawDiskVer2BackingInfo struct { // The name of the raw disk descriptor file. DescriptorFileName string `xml:"descriptorFileName" json:"descriptorFileName"` // Disk UUID for the virtual disk, if available. - Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty" vim:"2.5"` + Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty"` // The change ID of the virtual disk for the corresponding // snapshot or virtual machine. // // This can be used to track // incremental changes to a virtual disk. See // `VirtualMachine.QueryChangedDiskAreas`. - ChangeId string `xml:"changeId,omitempty" json:"changeId,omitempty" vim:"4.0"` + ChangeId string `xml:"changeId,omitempty" json:"changeId,omitempty"` // The sharing mode of the virtual disk. // // See `VirtualDiskSharing_enum`. The default value is // no sharing. - Sharing string `xml:"sharing,omitempty" json:"sharing,omitempty" vim:"6.0"` + Sharing string `xml:"sharing,omitempty" json:"sharing,omitempty"` } func init() { @@ -85646,7 +85245,7 @@ type VirtualDiskRawDiskVer2BackingOption struct { // file. DescriptorFileNameExtensions ChoiceOption `xml:"descriptorFileNameExtensions" json:"descriptorFileNameExtensions"` // Flag to indicate whether this backing supports disk UUID property. - Uuid bool `xml:"uuid" json:"uuid" vim:"2.5"` + Uuid bool `xml:"uuid" json:"uuid"` } func init() { @@ -85668,7 +85267,6 @@ type VirtualDiskRuleSpec struct { func init() { t["VirtualDiskRuleSpec"] = reflect.TypeOf((*VirtualDiskRuleSpec)(nil)).Elem() - minAPIVersionForType["VirtualDiskRuleSpec"] = "6.7" } // Backing type for virtual disks that use the space efficient @@ -85684,12 +85282,12 @@ type VirtualDiskSeSparseBackingInfo struct { // The disk persistence mode. // // Valid modes are: - // - `persistent` - // - `independent_persistent` - // - `independent_nonpersistent` - // - `nonpersistent` - // - `undoable` - // - `append` + // - `persistent` + // - `independent_persistent` + // - `independent_nonpersistent` + // - `nonpersistent` + // - `undoable` + // - `append` // // See also `VirtualDiskMode_enum`. DiskMode string `xml:"diskMode" json:"diskMode"` @@ -85769,12 +85367,11 @@ type VirtualDiskSeSparseBackingInfo struct { // On modification operations the value is ignored, use the specification // `VirtualDeviceConfigSpecBackingSpec.crypto` in // `VirtualDeviceConfigSpec.backing`. - KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty" vim:"6.5"` + KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty"` } func init() { t["VirtualDiskSeSparseBackingInfo"] = reflect.TypeOf((*VirtualDiskSeSparseBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualDiskSeSparseBackingInfo"] = "5.1" } // Backing options for virtual disks that use the space @@ -85789,9 +85386,9 @@ type VirtualDiskSeSparseBackingOption struct { // The disk mode. // // Valid disk modes are: - // - `persistent` - // - `independent_persistent` - // - `independent_nonpersistent` + // - `persistent` + // - `independent_persistent` + // - `independent_nonpersistent` // // See also `VirtualDiskMode_enum`. DiskMode ChoiceOption `xml:"diskMode" json:"diskMode"` @@ -85825,7 +85422,6 @@ type VirtualDiskSeSparseBackingOption struct { func init() { t["VirtualDiskSeSparseBackingOption"] = reflect.TypeOf((*VirtualDiskSeSparseBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualDiskSeSparseBackingOption"] = "5.1" } // This data object type contains information about backing a virtual disk by @@ -85837,12 +85433,12 @@ type VirtualDiskSparseVer1BackingInfo struct { // The disk persistence mode. // // Valid values are: - // - `persistent` - // - `nonpersistent` - // - `undoable` - // - `independent_persistent` - // - `independent_nonpersistent` - // - `append` + // - `persistent` + // - `nonpersistent` + // - `undoable` + // - `independent_persistent` + // - `independent_nonpersistent` + // - `append` // // See also `VirtualDiskMode_enum`. DiskMode string `xml:"diskMode" json:"diskMode"` @@ -85871,7 +85467,7 @@ type VirtualDiskSparseVer1BackingInfo struct { // The guarantee provided by the content ID is that if two disk backings have the // same content ID and are not currently being written to, then reads issued from // the guest operating system to those disk backings will return the same data. - ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty" vim:"4.0"` + ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty"` // The parent of this virtual disk file, if this is a delta disk backing. // // This will be unset if this is not a delta disk backing. @@ -85905,7 +85501,7 @@ type VirtualDiskSparseVer1BackingInfo struct { // This property may only be set if // `deltaDiskBackingsSupported` // is true. - Parent *VirtualDiskSparseVer1BackingInfo `xml:"parent,omitempty" json:"parent,omitempty" vim:"4.0"` + Parent *VirtualDiskSparseVer1BackingInfo `xml:"parent,omitempty" json:"parent,omitempty"` } func init() { @@ -85920,12 +85516,12 @@ type VirtualDiskSparseVer1BackingOption struct { // The disk mode. // // Valid disk modes are: - // - `persistent` - // - `nonpersistent` - // - `undoable` - // - `independent_persistent` - // - `independent_nonpersistent` - // - `append` + // - `persistent` + // - `nonpersistent` + // - `undoable` + // - `independent_persistent` + // - `independent_nonpersistent` + // - `append` // // See also `VirtualDiskMode_enum`. DiskModes ChoiceOption `xml:"diskModes" json:"diskModes"` @@ -85956,9 +85552,9 @@ type VirtualDiskSparseVer2BackingInfo struct { // The disk persistence mode. // // Valid modes are: - // - `persistent` - // - `independent_persistent` - // - `independent_nonpersistent` + // - `persistent` + // - `independent_persistent` + // - `independent_nonpersistent` // // See also `VirtualDiskMode_enum`. DiskMode string `xml:"diskMode" json:"diskMode"` @@ -85977,7 +85573,7 @@ type VirtualDiskSparseVer2BackingInfo struct { // on a virtual machine. SpaceUsedInKB int64 `xml:"spaceUsedInKB,omitempty" json:"spaceUsedInKB,omitempty"` // Disk UUID for the virtual disk, if available. - Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty" vim:"2.5"` + Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty"` // Content ID of the virtual disk file, if available. // // A content ID indicates the logical contents of the disk backing and its parents. @@ -85989,14 +85585,14 @@ type VirtualDiskSparseVer2BackingInfo struct { // The guarantee provided by the content ID is that if two disk backings have the // same content ID and are not currently being written to, then reads issued from // the guest operating system to those disk backings will return the same data. - ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty" vim:"4.0"` + ContentId string `xml:"contentId,omitempty" json:"contentId,omitempty"` // The change ID of the virtual disk for the corresponding // snapshot or virtual machine. // // This can be used to track // incremental changes to a virtual disk. See // `VirtualMachine.QueryChangedDiskAreas`. - ChangeId string `xml:"changeId,omitempty" json:"changeId,omitempty" vim:"4.0"` + ChangeId string `xml:"changeId,omitempty" json:"changeId,omitempty"` // The parent of this virtual disk file, if this is a delta disk backing. // // This will be unset if this is not a delta disk backing. @@ -86030,13 +85626,13 @@ type VirtualDiskSparseVer2BackingInfo struct { // This property may only be set if // `deltaDiskBackingsSupported` // is true. - Parent *VirtualDiskSparseVer2BackingInfo `xml:"parent,omitempty" json:"parent,omitempty" vim:"4.0"` + Parent *VirtualDiskSparseVer2BackingInfo `xml:"parent,omitempty" json:"parent,omitempty"` // Virtual Disk Backing encryption options. // // On modification operations the value is ignored, use the specification // `VirtualDeviceConfigSpecBackingSpec.crypto` in // `VirtualDeviceConfigSpec.backing`. - KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty" vim:"6.5"` + KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty"` } func init() { @@ -86051,12 +85647,12 @@ type VirtualDiskSparseVer2BackingOption struct { // The disk mode. // // Valid disk modes are: - // - `persistent` - // - `nonpersistent` - // - `undoable` - // - `independent_persistent` - // - `independent_nonpersistent` - // - `append` + // - `persistent` + // - `nonpersistent` + // - `undoable` + // - `independent_persistent` + // - `independent_nonpersistent` + // - `append` // // See also `VirtualDiskMode_enum`. DiskMode ChoiceOption `xml:"diskMode" json:"diskMode"` @@ -86085,9 +85681,9 @@ type VirtualDiskSparseVer2BackingOption struct { // with a `VirtualDisk.capacityInKB` value greater // than its current value will grow the disk to the newly specified size // while the virtual machine is powered on. - HotGrowable bool `xml:"hotGrowable" json:"hotGrowable" vim:"2.5"` + HotGrowable bool `xml:"hotGrowable" json:"hotGrowable"` // Flag to indicate whether this backing supports disk UUID property. - Uuid bool `xml:"uuid" json:"uuid" vim:"2.5"` + Uuid bool `xml:"uuid" json:"uuid"` } func init() { @@ -86110,7 +85706,6 @@ type VirtualDiskSpec struct { func init() { t["VirtualDiskSpec"] = reflect.TypeOf((*VirtualDiskSpec)(nil)).Elem() - minAPIVersionForType["VirtualDiskSpec"] = "2.5" } // Data object describes the vFlash cache configuration on this virtual disk. @@ -86165,7 +85760,6 @@ type VirtualDiskVFlashCacheConfigInfo struct { func init() { t["VirtualDiskVFlashCacheConfigInfo"] = reflect.TypeOf((*VirtualDiskVFlashCacheConfigInfo)(nil)).Elem() - minAPIVersionForType["VirtualDiskVFlashCacheConfigInfo"] = "5.5" } // The VirtualE1000 data object type represents an instance @@ -86196,7 +85790,6 @@ type VirtualE1000e struct { func init() { t["VirtualE1000e"] = reflect.TypeOf((*VirtualE1000e)(nil)).Elem() - minAPIVersionForType["VirtualE1000e"] = "5.0" } // The VirtualE1000e option data object type contains the options for the @@ -86207,7 +85800,6 @@ type VirtualE1000eOption struct { func init() { t["VirtualE1000eOption"] = reflect.TypeOf((*VirtualE1000eOption)(nil)).Elem() - minAPIVersionForType["VirtualE1000eOption"] = "5.0" } // The VirtualEnsoniq1371 data object type represents an Ensoniq 1371 @@ -86260,7 +85852,7 @@ type VirtualEthernetCard struct { // can set this property to selectively enable or disable wake-on-LAN. WakeOnLanEnabled *bool `xml:"wakeOnLanEnabled" json:"wakeOnLanEnabled,omitempty"` // Resource requirements of the virtual network adapter - ResourceAllocation *VirtualEthernetCardResourceAllocation `xml:"resourceAllocation,omitempty" json:"resourceAllocation,omitempty" vim:"6.0"` + ResourceAllocation *VirtualEthernetCardResourceAllocation `xml:"resourceAllocation,omitempty" json:"resourceAllocation,omitempty"` // An ID assigned to the virtual network adapter by external management plane or // controller. // @@ -86269,7 +85861,7 @@ type VirtualEthernetCard struct { // also up to external management plane or controller to set, unset or maintain // this property. Setting this property with an empty string value will unset the // property. - ExternalId string `xml:"externalId,omitempty" json:"externalId,omitempty" vim:"6.0"` + ExternalId string `xml:"externalId,omitempty" json:"externalId,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer supported and // there is no replacement. // @@ -86279,7 +85871,7 @@ type VirtualEthernetCard struct { // UPT is only compatible for Vmxnet3 adapter. // Clients can set this property enabled or disabled if ethernet // virtual device is Vmxnet3. - UptCompatibilityEnabled *bool `xml:"uptCompatibilityEnabled" json:"uptCompatibilityEnabled,omitempty" vim:"6.0"` + UptCompatibilityEnabled *bool `xml:"uptCompatibilityEnabled" json:"uptCompatibilityEnabled,omitempty"` } func init() { @@ -86294,7 +85886,6 @@ type VirtualEthernetCardDVPortBackingOption struct { func init() { t["VirtualEthernetCardDVPortBackingOption"] = reflect.TypeOf((*VirtualEthernetCardDVPortBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualEthernetCardDVPortBackingOption"] = "4.0" } // The `VirtualEthernetCardDistributedVirtualPortBackingInfo` @@ -86319,7 +85910,6 @@ type VirtualEthernetCardDistributedVirtualPortBackingInfo struct { func init() { t["VirtualEthernetCardDistributedVirtualPortBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardDistributedVirtualPortBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualEthernetCardDistributedVirtualPortBackingInfo"] = "4.0" } // The `VirtualEthernetCardLegacyNetworkBackingInfo` data object @@ -86357,7 +85947,7 @@ type VirtualEthernetCardNetworkBackingInfo struct { //  . // //   - InPassthroughMode *bool `xml:"inPassthroughMode" json:"inPassthroughMode,omitempty" vim:"2.5 U2"` + InPassthroughMode *bool `xml:"inPassthroughMode" json:"inPassthroughMode,omitempty"` } func init() { @@ -86381,7 +85971,6 @@ type VirtualEthernetCardNotSupported struct { func init() { t["VirtualEthernetCardNotSupported"] = reflect.TypeOf((*VirtualEthernetCardNotSupported)(nil)).Elem() - minAPIVersionForType["VirtualEthernetCardNotSupported"] = "2.5" } type VirtualEthernetCardNotSupportedFault VirtualEthernetCardNotSupported @@ -86403,7 +85992,6 @@ type VirtualEthernetCardOpaqueNetworkBackingInfo struct { func init() { t["VirtualEthernetCardOpaqueNetworkBackingInfo"] = reflect.TypeOf((*VirtualEthernetCardOpaqueNetworkBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualEthernetCardOpaqueNetworkBackingInfo"] = "5.5" } // This data object type contains the options for @@ -86414,7 +86002,6 @@ type VirtualEthernetCardOpaqueNetworkBackingOption struct { func init() { t["VirtualEthernetCardOpaqueNetworkBackingOption"] = reflect.TypeOf((*VirtualEthernetCardOpaqueNetworkBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualEthernetCardOpaqueNetworkBackingOption"] = "5.5" } // This data object type contains the options for the @@ -86438,12 +86025,12 @@ type VirtualEthernetCardOption struct { // there is no replacement. // // Flag to indicate whether VMDirectPath Gen 2 is available on this device. - VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported" json:"vmDirectPathGen2Supported,omitempty" vim:"4.1"` + VmDirectPathGen2Supported *bool `xml:"vmDirectPathGen2Supported" json:"vmDirectPathGen2Supported,omitempty"` // Deprecated as of vSphere API 8.0. VMDirectPath Gen 2 is no longer supported and // there is no replacement. // // Flag to indicate whether Universal Pass-through(UPT) is settable on this device. - UptCompatibilityEnabled *BoolOption `xml:"uptCompatibilityEnabled,omitempty" json:"uptCompatibilityEnabled,omitempty" vim:"6.0"` + UptCompatibilityEnabled *BoolOption `xml:"uptCompatibilityEnabled,omitempty" json:"uptCompatibilityEnabled,omitempty"` } func init() { @@ -86480,7 +86067,6 @@ type VirtualEthernetCardResourceAllocation struct { func init() { t["VirtualEthernetCardResourceAllocation"] = reflect.TypeOf((*VirtualEthernetCardResourceAllocation)(nil)).Elem() - minAPIVersionForType["VirtualEthernetCardResourceAllocation"] = "6.0" } // The VirtualFloppy data object type contains information about a floppy drive @@ -86577,15 +86163,15 @@ type VirtualHardware struct { // implies numCoresPerSocket is 1. // In other cases, this field represents the actual virtual socket // size seen by the virtual machine. - NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty" json:"numCoresPerSocket,omitempty" vim:"5.0"` + NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty" json:"numCoresPerSocket,omitempty"` // Cores per socket is automatically determined. AutoCoresPerSocket *bool `xml:"autoCoresPerSocket" json:"autoCoresPerSocket,omitempty" vim:"8.0.0.1"` // Memory size, in MB. MemoryMB int32 `xml:"memoryMB" json:"memoryMB"` // Does this virtual machine have Virtual Intel I/O Controller Hub 7 - VirtualICH7MPresent *bool `xml:"virtualICH7MPresent" json:"virtualICH7MPresent,omitempty" vim:"5.0"` + VirtualICH7MPresent *bool `xml:"virtualICH7MPresent" json:"virtualICH7MPresent,omitempty"` // Does this virtual machine have System Management Controller - VirtualSMCPresent *bool `xml:"virtualSMCPresent" json:"virtualSMCPresent,omitempty" vim:"5.0"` + VirtualSMCPresent *bool `xml:"virtualSMCPresent" json:"virtualSMCPresent,omitempty"` // The set of virtual devices belonging to the virtual machine. // // This list is unordered. @@ -86652,7 +86238,7 @@ type VirtualHardwareOption struct { NumCPU []int32 `xml:"numCPU" json:"numCPU"` // The minimum, maximum and default number of cores per socket that // can be used when distributing virtual CPUs. - NumCoresPerSocket *IntOption `xml:"numCoresPerSocket,omitempty" json:"numCoresPerSocket,omitempty" vim:"5.0"` + NumCoresPerSocket *IntOption `xml:"numCoresPerSocket,omitempty" json:"numCoresPerSocket,omitempty"` // Whether auto cores per socket is supported. AutoCoresPerSocket *BoolOption `xml:"autoCoresPerSocket,omitempty" json:"autoCoresPerSocket,omitempty" vim:"8.0.0.1"` // Can the number of virtual CPUs be changed @@ -86681,7 +86267,7 @@ type VirtualHardwareOption struct { NumUSBControllers IntOption `xml:"numUSBControllers" json:"numUSBControllers"` // The minimum, maximum, and default number of XHCI (USB 3.0) controllers for // this virtual machine configuration. - NumUSBXHCIControllers *IntOption `xml:"numUSBXHCIControllers,omitempty" json:"numUSBXHCIControllers,omitempty" vim:"5.0"` + NumUSBXHCIControllers *IntOption `xml:"numUSBXHCIControllers,omitempty" json:"numUSBXHCIControllers,omitempty"` // The minimum, maximum, and default number of SIO controllers for // this virtual machine configuration. NumSIOControllers IntOption `xml:"numSIOControllers" json:"numSIOControllers"` @@ -86696,24 +86282,24 @@ type VirtualHardwareOption struct { LicensingLimit []string `xml:"licensingLimit,omitempty" json:"licensingLimit,omitempty"` // The minimum, maximum and default number of NPIV WorldWideNode names // supported for this virtual machine configuration. - NumSupportedWwnPorts *IntOption `xml:"numSupportedWwnPorts,omitempty" json:"numSupportedWwnPorts,omitempty" vim:"4.0"` + NumSupportedWwnPorts *IntOption `xml:"numSupportedWwnPorts,omitempty" json:"numSupportedWwnPorts,omitempty"` // The minimum, maximum and default number of NPIV WorldWidePort names // supported for this virtual machine configuration. - NumSupportedWwnNodes *IntOption `xml:"numSupportedWwnNodes,omitempty" json:"numSupportedWwnNodes,omitempty" vim:"4.0"` + NumSupportedWwnNodes *IntOption `xml:"numSupportedWwnNodes,omitempty" json:"numSupportedWwnNodes,omitempty"` // Default value and value range for `ResourceConfigOption` - ResourceConfigOption *ResourceConfigOption `xml:"resourceConfigOption,omitempty" json:"resourceConfigOption,omitempty" vim:"4.1"` + ResourceConfigOption *ResourceConfigOption `xml:"resourceConfigOption,omitempty" json:"resourceConfigOption,omitempty"` // The minimum, maximum and default number of virtual NVDIMM controllers // for this virtual machine configuration. - NumNVDIMMControllers *IntOption `xml:"numNVDIMMControllers,omitempty" json:"numNVDIMMControllers,omitempty" vim:"6.7"` + NumNVDIMMControllers *IntOption `xml:"numNVDIMMControllers,omitempty" json:"numNVDIMMControllers,omitempty"` // The minimum, maximum, and default number of virtual TPMs. - NumTPMDevices *IntOption `xml:"numTPMDevices,omitempty" json:"numTPMDevices,omitempty" vim:"6.7"` + NumTPMDevices *IntOption `xml:"numTPMDevices,omitempty" json:"numTPMDevices,omitempty"` // The minimum, maximum, and default number of virtual watchdog timers. - NumWDTDevices *IntOption `xml:"numWDTDevices,omitempty" json:"numWDTDevices,omitempty" vim:"7.0"` + NumWDTDevices *IntOption `xml:"numWDTDevices,omitempty" json:"numWDTDevices,omitempty"` // The minimum, maximum and default number of PrecisionClock devices. - NumPrecisionClockDevices *IntOption `xml:"numPrecisionClockDevices,omitempty" json:"numPrecisionClockDevices,omitempty" vim:"7.0"` + NumPrecisionClockDevices *IntOption `xml:"numPrecisionClockDevices,omitempty" json:"numPrecisionClockDevices,omitempty"` // The minimum, maximum and default value of Intel's Secure Guard Extensions // Enclave Page Cache (EPC) memory. - EpcMemoryMB *LongOption `xml:"epcMemoryMB,omitempty" json:"epcMemoryMB,omitempty" vim:"7.0"` + EpcMemoryMB *LongOption `xml:"epcMemoryMB,omitempty" json:"epcMemoryMB,omitempty"` // Empty for HWv17 & older, \["efi"\] for HWv18. AcpiHostBridgesFirmware []string `xml:"acpiHostBridgesFirmware,omitempty" json:"acpiHostBridgesFirmware,omitempty" vim:"8.0.0.1"` // The minimum, maximum and default number of CPU simultaneous threads. @@ -86738,7 +86324,7 @@ type VirtualHardwareVersionNotSupported struct { // The host. // // Refers instance of `HostSystem`. - Host ManagedObjectReference `xml:"host" json:"host" vim:"2.5"` + Host ManagedObjectReference `xml:"host" json:"host"` } func init() { @@ -86759,7 +86345,6 @@ type VirtualHdAudioCard struct { func init() { t["VirtualHdAudioCard"] = reflect.TypeOf((*VirtualHdAudioCard)(nil)).Elem() - minAPIVersionForType["VirtualHdAudioCard"] = "5.0" } // The VirtualHdAudioCardOption data object type contains the options for a @@ -86770,7 +86355,6 @@ type VirtualHdAudioCardOption struct { func init() { t["VirtualHdAudioCardOption"] = reflect.TypeOf((*VirtualHdAudioCardOption)(nil)).Elem() - minAPIVersionForType["VirtualHdAudioCardOption"] = "5.0" } // The VirtualIDEController data object type specifies a virtual IDE controller. @@ -86853,7 +86437,6 @@ type VirtualLsiLogicSASController struct { func init() { t["VirtualLsiLogicSASController"] = reflect.TypeOf((*VirtualLsiLogicSASController)(nil)).Elem() - minAPIVersionForType["VirtualLsiLogicSASController"] = "2.5 U2" } // VirtualLsiLogicSASControllerOption is the data object that contains @@ -86864,7 +86447,6 @@ type VirtualLsiLogicSASControllerOption struct { func init() { t["VirtualLsiLogicSASControllerOption"] = reflect.TypeOf((*VirtualLsiLogicSASControllerOption)(nil)).Elem() - minAPIVersionForType["VirtualLsiLogicSASControllerOption"] = "2.5 U2" } // Specification of scheduling affinity. @@ -86931,7 +86513,7 @@ type VirtualMachineBootOptions struct { // and this flag is set to false error is returned. // \- If this flag is unset and vim.vm.FlagInfo.vbsEnabled is set to // true, the value of this flag is set to true. - EfiSecureBootEnabled *bool `xml:"efiSecureBootEnabled" json:"efiSecureBootEnabled,omitempty" vim:"6.5"` + EfiSecureBootEnabled *bool `xml:"efiSecureBootEnabled" json:"efiSecureBootEnabled,omitempty"` // If set to true, a virtual machine that fails // to boot will try again after the `VirtualMachineBootOptions.bootRetryDelay` // time period has expired. @@ -86939,14 +86521,14 @@ type VirtualMachineBootOptions struct { // When false, // the virtual machine waits indefinitely for you to initiate // boot retry. - BootRetryEnabled *bool `xml:"bootRetryEnabled" json:"bootRetryEnabled,omitempty" vim:"4.1"` + BootRetryEnabled *bool `xml:"bootRetryEnabled" json:"bootRetryEnabled,omitempty"` // Delay in milliseconds before a boot retry. // // The boot retry delay // specifies a time interval between virtual machine boot failure // and the subsequent attempt to boot again. The virtual machine // uses this value only if `VirtualMachineBootOptions.bootRetryEnabled` is true. - BootRetryDelay int64 `xml:"bootRetryDelay,omitempty" json:"bootRetryDelay,omitempty" vim:"4.1"` + BootRetryDelay int64 `xml:"bootRetryDelay,omitempty" json:"bootRetryDelay,omitempty"` // Boot order. // // Listed devices are used for booting. After list @@ -86959,16 +86541,15 @@ type VirtualMachineBootOptions struct { // it supports. If bootable device is not reached before platform's // limit is hit, boot will fail. At least single entry is supported // by all products supporting boot order settings. - BootOrder []BaseVirtualMachineBootOptionsBootableDevice `xml:"bootOrder,omitempty,typeattr" json:"bootOrder,omitempty" vim:"5.0"` + BootOrder []BaseVirtualMachineBootOptionsBootableDevice `xml:"bootOrder,omitempty,typeattr" json:"bootOrder,omitempty"` // Protocol to attempt during PXE network boot or NetBoot. // // See also `VirtualMachineBootOptionsNetworkBootProtocolType_enum`. - NetworkBootProtocol string `xml:"networkBootProtocol,omitempty" json:"networkBootProtocol,omitempty" vim:"6.0"` + NetworkBootProtocol string `xml:"networkBootProtocol,omitempty" json:"networkBootProtocol,omitempty"` } func init() { t["VirtualMachineBootOptions"] = reflect.TypeOf((*VirtualMachineBootOptions)(nil)).Elem() - minAPIVersionForType["VirtualMachineBootOptions"] = "2.5" } // Bootable CDROM. @@ -86980,7 +86561,6 @@ type VirtualMachineBootOptionsBootableCdromDevice struct { func init() { t["VirtualMachineBootOptionsBootableCdromDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableCdromDevice)(nil)).Elem() - minAPIVersionForType["VirtualMachineBootOptionsBootableCdromDevice"] = "5.0" } // Bootable device. @@ -86990,7 +86570,6 @@ type VirtualMachineBootOptionsBootableDevice struct { func init() { t["VirtualMachineBootOptionsBootableDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDevice)(nil)).Elem() - minAPIVersionForType["VirtualMachineBootOptionsBootableDevice"] = "5.0" } // Bootable disk. @@ -87004,7 +86583,6 @@ type VirtualMachineBootOptionsBootableDiskDevice struct { func init() { t["VirtualMachineBootOptionsBootableDiskDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableDiskDevice)(nil)).Elem() - minAPIVersionForType["VirtualMachineBootOptionsBootableDiskDevice"] = "5.0" } // Bootable ethernet adapter. @@ -87020,7 +86598,6 @@ type VirtualMachineBootOptionsBootableEthernetDevice struct { func init() { t["VirtualMachineBootOptionsBootableEthernetDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableEthernetDevice)(nil)).Elem() - minAPIVersionForType["VirtualMachineBootOptionsBootableEthernetDevice"] = "5.0" } // Bootable floppy disk. @@ -87030,7 +86607,6 @@ type VirtualMachineBootOptionsBootableFloppyDevice struct { func init() { t["VirtualMachineBootOptionsBootableFloppyDevice"] = reflect.TypeOf((*VirtualMachineBootOptionsBootableFloppyDevice)(nil)).Elem() - minAPIVersionForType["VirtualMachineBootOptionsBootableFloppyDevice"] = "5.0" } // This data object type contains information about the @@ -87063,9 +86639,9 @@ type VirtualMachineCapability struct { // always false. // // Indicates whether or not snapshots can be disabled. - DisableSnapshotsSupported bool `xml:"disableSnapshotsSupported" json:"disableSnapshotsSupported" vim:"2.5"` + DisableSnapshotsSupported bool `xml:"disableSnapshotsSupported" json:"disableSnapshotsSupported"` // Indicates whether or not the snapshot tree can be locked. - LockSnapshotsSupported bool `xml:"lockSnapshotsSupported" json:"lockSnapshotsSupported" vim:"2.5"` + LockSnapshotsSupported bool `xml:"lockSnapshotsSupported" json:"lockSnapshotsSupported"` // Indicates whether console preferences can be set for this virtual machine. ConsolePreferencesSupported bool `xml:"consolePreferencesSupported" json:"consolePreferencesSupported"` // Indicates whether CPU feature requirements masks can be set for this @@ -87085,44 +86661,44 @@ type VirtualMachineCapability struct { // Supports tools auto-update. ToolsAutoUpdateSupported bool `xml:"toolsAutoUpdateSupported" json:"toolsAutoUpdateSupported"` // Supports virtual machine NPIV WWN. - VmNpivWwnSupported bool `xml:"vmNpivWwnSupported" json:"vmNpivWwnSupported" vim:"2.5"` + VmNpivWwnSupported bool `xml:"vmNpivWwnSupported" json:"vmNpivWwnSupported"` // Supports assigning NPIV WWN to virtual machines that don't have RDM disks. - NpivWwnOnNonRdmVmSupported bool `xml:"npivWwnOnNonRdmVmSupported" json:"npivWwnOnNonRdmVmSupported" vim:"2.5"` + NpivWwnOnNonRdmVmSupported bool `xml:"npivWwnOnNonRdmVmSupported" json:"npivWwnOnNonRdmVmSupported"` // Indicates whether the NPIV disabling operation is supported the virtual machine. - VmNpivWwnDisableSupported *bool `xml:"vmNpivWwnDisableSupported" json:"vmNpivWwnDisableSupported,omitempty" vim:"4.0"` + VmNpivWwnDisableSupported *bool `xml:"vmNpivWwnDisableSupported" json:"vmNpivWwnDisableSupported,omitempty"` // Indicates whether the update of NPIV WWNs are supported on the virtual machine. - VmNpivWwnUpdateSupported *bool `xml:"vmNpivWwnUpdateSupported" json:"vmNpivWwnUpdateSupported,omitempty" vim:"4.0"` + VmNpivWwnUpdateSupported *bool `xml:"vmNpivWwnUpdateSupported" json:"vmNpivWwnUpdateSupported,omitempty"` // Flag indicating whether the virtual machine has a configurable // *swapfile placement policy*. - SwapPlacementSupported bool `xml:"swapPlacementSupported" json:"swapPlacementSupported" vim:"2.5"` + SwapPlacementSupported bool `xml:"swapPlacementSupported" json:"swapPlacementSupported"` // Indicates whether asking tools to sync time with the host is supported. - ToolsSyncTimeSupported bool `xml:"toolsSyncTimeSupported" json:"toolsSyncTimeSupported" vim:"2.5"` + ToolsSyncTimeSupported bool `xml:"toolsSyncTimeSupported" json:"toolsSyncTimeSupported"` // Indicates whether or not the use of nested page table hardware support // can be explicitly set. - VirtualMmuUsageSupported bool `xml:"virtualMmuUsageSupported" json:"virtualMmuUsageSupported" vim:"2.5"` + VirtualMmuUsageSupported bool `xml:"virtualMmuUsageSupported" json:"virtualMmuUsageSupported"` // Indicates whether resource settings for disks can be // applied to this virtual machine. - DiskSharesSupported bool `xml:"diskSharesSupported" json:"diskSharesSupported" vim:"2.5"` + DiskSharesSupported bool `xml:"diskSharesSupported" json:"diskSharesSupported"` // Indicates whether boot options can be configured // for this virtual machine. - BootOptionsSupported bool `xml:"bootOptionsSupported" json:"bootOptionsSupported" vim:"2.5"` + BootOptionsSupported bool `xml:"bootOptionsSupported" json:"bootOptionsSupported"` // Indicates whether automatic boot retry can be // configured for this virtual machine. - BootRetryOptionsSupported *bool `xml:"bootRetryOptionsSupported" json:"bootRetryOptionsSupported,omitempty" vim:"4.1"` + BootRetryOptionsSupported *bool `xml:"bootRetryOptionsSupported" json:"bootRetryOptionsSupported,omitempty"` // Flag indicating whether the video ram size of this virtual machine // can be configured. - SettingVideoRamSizeSupported bool `xml:"settingVideoRamSizeSupported" json:"settingVideoRamSizeSupported" vim:"2.5"` + SettingVideoRamSizeSupported bool `xml:"settingVideoRamSizeSupported" json:"settingVideoRamSizeSupported"` // Indicates whether of not this virtual machine supports // setting the display topology of the console window. // // This capability depends on the guest operating system // configured for this virtual machine. - SettingDisplayTopologySupported *bool `xml:"settingDisplayTopologySupported" json:"settingDisplayTopologySupported,omitempty" vim:"2.5 U2"` + SettingDisplayTopologySupported *bool `xml:"settingDisplayTopologySupported" json:"settingDisplayTopologySupported,omitempty"` // Deprecated as of vSphere API 6.0. // // Indicates whether record and replay functionality is supported on this // virtual machine. - RecordReplaySupported *bool `xml:"recordReplaySupported" json:"recordReplaySupported,omitempty" vim:"4.0"` + RecordReplaySupported *bool `xml:"recordReplaySupported" json:"recordReplaySupported,omitempty"` // Indicates that change tracking is supported for virtual disks of this // virtual machine. // @@ -87130,53 +86706,53 @@ type VirtualMachineCapability struct { // not be available for all disks of the virtual machine. For example, // passthru raw disk mappings or disks backed by any Ver1BackingInfo cannot // be tracked. - ChangeTrackingSupported *bool `xml:"changeTrackingSupported" json:"changeTrackingSupported,omitempty" vim:"4.0"` + ChangeTrackingSupported *bool `xml:"changeTrackingSupported" json:"changeTrackingSupported,omitempty"` // Indicates whether multiple virtual cores per socket is supported on this VM. - MultipleCoresPerSocketSupported *bool `xml:"multipleCoresPerSocketSupported" json:"multipleCoresPerSocketSupported,omitempty" vim:"5.0"` + MultipleCoresPerSocketSupported *bool `xml:"multipleCoresPerSocketSupported" json:"multipleCoresPerSocketSupported,omitempty"` // Indicates that host based replication is supported on this virtual // machine. // // However, even if host based replication is supported, // it might not be available for all disk types. For example, passthru // raw disk mappings can not be replicated. - HostBasedReplicationSupported *bool `xml:"hostBasedReplicationSupported" json:"hostBasedReplicationSupported,omitempty" vim:"5.0"` + HostBasedReplicationSupported *bool `xml:"hostBasedReplicationSupported" json:"hostBasedReplicationSupported,omitempty"` // Indicates whether features like guest OS auto-lock and MKS connection // controls are supported for this virtual machine. - GuestAutoLockSupported *bool `xml:"guestAutoLockSupported" json:"guestAutoLockSupported,omitempty" vim:"5.0"` + GuestAutoLockSupported *bool `xml:"guestAutoLockSupported" json:"guestAutoLockSupported,omitempty"` // Indicates whether // `memoryReservationLockedToMax` // may be set to true for this virtual machine. - MemoryReservationLockSupported *bool `xml:"memoryReservationLockSupported" json:"memoryReservationLockSupported,omitempty" vim:"5.0"` + MemoryReservationLockSupported *bool `xml:"memoryReservationLockSupported" json:"memoryReservationLockSupported,omitempty"` // Indicates whether featureRequirement feature is supported. - FeatureRequirementSupported *bool `xml:"featureRequirementSupported" json:"featureRequirementSupported,omitempty" vim:"5.1"` + FeatureRequirementSupported *bool `xml:"featureRequirementSupported" json:"featureRequirementSupported,omitempty"` // Indicates whether a monitor type change is supported while this virtual // machine is in the poweredOn state. - PoweredOnMonitorTypeChangeSupported *bool `xml:"poweredOnMonitorTypeChangeSupported" json:"poweredOnMonitorTypeChangeSupported,omitempty" vim:"5.1"` + PoweredOnMonitorTypeChangeSupported *bool `xml:"poweredOnMonitorTypeChangeSupported" json:"poweredOnMonitorTypeChangeSupported,omitempty"` // Indicates whether this virtual machine supports the Flex-SE // (space-efficient, sparse) format for virtual disks. - SeSparseDiskSupported *bool `xml:"seSparseDiskSupported" json:"seSparseDiskSupported,omitempty" vim:"5.1"` + SeSparseDiskSupported *bool `xml:"seSparseDiskSupported" json:"seSparseDiskSupported,omitempty"` // Indicates whether this virtual machine supports nested hardware-assisted // virtualization. - NestedHVSupported *bool `xml:"nestedHVSupported" json:"nestedHVSupported,omitempty" vim:"5.1"` + NestedHVSupported *bool `xml:"nestedHVSupported" json:"nestedHVSupported,omitempty"` // Indicates whether this virtual machine supports virtualized CPU performance // counters. - VPMCSupported *bool `xml:"vPMCSupported" json:"vPMCSupported,omitempty" vim:"5.1"` + VPMCSupported *bool `xml:"vPMCSupported" json:"vPMCSupported,omitempty"` // Indicates whether secureBoot is supported for this virtual machine. - SecureBootSupported *bool `xml:"secureBootSupported" json:"secureBootSupported,omitempty" vim:"6.5"` + SecureBootSupported *bool `xml:"secureBootSupported" json:"secureBootSupported,omitempty"` // Indicates whether this virtual machine supports Per-VM EVC mode. - PerVmEvcSupported *bool `xml:"perVmEvcSupported" json:"perVmEvcSupported,omitempty" vim:"6.7"` + PerVmEvcSupported *bool `xml:"perVmEvcSupported" json:"perVmEvcSupported,omitempty"` // Indicates that `VirtualMachineFlagInfo.virtualMmuUsage` is // ignored by this virtual machine, always operating as if "on" was selected. - VirtualMmuUsageIgnored *bool `xml:"virtualMmuUsageIgnored" json:"virtualMmuUsageIgnored,omitempty" vim:"6.7"` + VirtualMmuUsageIgnored *bool `xml:"virtualMmuUsageIgnored" json:"virtualMmuUsageIgnored,omitempty"` // Indicates that `VirtualMachineFlagInfo.virtualExecUsage` is // ignored by this virtual machine, always operating as if "hvOn" was selected. - VirtualExecUsageIgnored *bool `xml:"virtualExecUsageIgnored" json:"virtualExecUsageIgnored,omitempty" vim:"6.7"` + VirtualExecUsageIgnored *bool `xml:"virtualExecUsageIgnored" json:"virtualExecUsageIgnored,omitempty"` // Indicates whether this virtual machine supports creating disk-only snapshots // in suspended state. // // If this capability is not set, the snapshot of a // virtual machine in suspended state will always include memory. - DiskOnlySnapshotOnSuspendedVMSupported *bool `xml:"diskOnlySnapshotOnSuspendedVMSupported" json:"diskOnlySnapshotOnSuspendedVMSupported,omitempty" vim:"6.7"` + DiskOnlySnapshotOnSuspendedVMSupported *bool `xml:"diskOnlySnapshotOnSuspendedVMSupported" json:"diskOnlySnapshotOnSuspendedVMSupported,omitempty"` // Indicates whether this virtual machine supports suspending to memory. SuspendToMemorySupported *bool `xml:"suspendToMemorySupported" json:"suspendToMemorySupported,omitempty" vim:"7.0.2.0"` // Indicates support for allowing or disallowing all tools time @@ -87210,7 +86786,7 @@ type VirtualMachineCdromInfo struct { // Description of the physical device. // // This is set only by the server. - Description string `xml:"description,omitempty" json:"description,omitempty" vim:"6.5"` + Description string `xml:"description,omitempty" json:"description,omitempty"` } func init() { @@ -87244,13 +86820,13 @@ type VirtualMachineCloneSpec struct { // newly cloned virtual machine will use. // // The location specifies: - // - A datastore where the virtual machine will be located on physical - // storage. - // This is always provided because it indicates where the newly - // created clone will be copied. - // - a resource pool and optionally a host. The resource pool - // determines what compute resources will be available to the clone - // and the host indicates which machine will host the clone. + // - A datastore where the virtual machine will be located on physical + // storage. + // This is always provided because it indicates where the newly + // created clone will be copied. + // - a resource pool and optionally a host. The resource pool + // determines what compute resources will be available to the clone + // and the host indicates which machine will host the clone. Location VirtualMachineRelocateSpec `xml:"location" json:"location"` // Specifies whether or not the new virtual machine should be marked as a // template. @@ -87295,7 +86871,7 @@ type VirtualMachineCloneSpec struct { // to exist on the destination host for the clone. // // Refers instance of `VirtualMachineSnapshot`. - Snapshot *ManagedObjectReference `xml:"snapshot,omitempty" json:"snapshot,omitempty" vim:"4.0"` + Snapshot *ManagedObjectReference `xml:"snapshot,omitempty" json:"snapshot,omitempty"` // Flag indicating whether to retain a copy of the source virtual machine's // memory state in the clone. // @@ -87316,7 +86892,7 @@ type VirtualMachineCloneSpec struct { // only applies for a snapshot taken on a running or suspended // virtual machine with the 'memory' parameter set to true, because otherwise // the snapshot has no memory state. This flag defaults to false. - Memory *bool `xml:"memory" json:"memory,omitempty" vim:"5.5"` + Memory *bool `xml:"memory" json:"memory,omitempty"` // Provisioning policy for virtual TPM devices during VM clone operations. // // The list of supported values is defined in `VirtualMachineCloneSpecTpmProvisionPolicy_enum`. @@ -87368,14 +86944,14 @@ type VirtualMachineConfigInfo struct { // in "12345678-abcd-1234-cdef-123456789abc" format. Uuid string `xml:"uuid" json:"uuid"` // Time the virtual machine's configuration was created. - CreateDate *time.Time `xml:"createDate" json:"createDate,omitempty" vim:"6.7"` + CreateDate *time.Time `xml:"createDate" json:"createDate,omitempty"` // VirtualCenter-specific 128-bit UUID of a virtual machine, represented // as a hexademical string. // // This identifier is used by VirtualCenter to // uniquely identify all virtual machine instances, including those that // may share the same SMBIOS UUID. - InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty" vim:"4.0"` + InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty"` // A 64-bit node WWN (World Wide Name). // // These WWNs are paired with the @@ -87388,44 +86964,44 @@ type VirtualMachineConfigInfo struct { // with all port WWNs listed in `VirtualMachineConfigInfo.npivPortWorldWideName`. If this property or // `VirtualMachineConfigInfo.npivPortWorldWideName` is empty or unset, NPIV WWN is disabled for the // virtual machine. - NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty" json:"npivNodeWorldWideName,omitempty" vim:"2.5"` + NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty" json:"npivNodeWorldWideName,omitempty"` // A 64-bit port WWN (World Wide Name). // // For detail description on WWN, see // `VirtualMachineConfigInfo.npivNodeWorldWideName`. - NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty" json:"npivPortWorldWideName,omitempty" vim:"2.5"` + NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty" json:"npivPortWorldWideName,omitempty"` // The source that provides/generates the assigned WWNs. // // See also `VirtualMachineConfigInfoNpivWwnType_enum`. - NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty" json:"npivWorldWideNameType,omitempty" vim:"2.5"` + NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty" json:"npivWorldWideNameType,omitempty"` // The NPIV node WWNs to be extended from the original list of WWN nummbers. // // This // property should be set to desired number which is an aggregate of existing // plus new numbers. Desired Node WWNs should always be greater than the existing // number of node WWNs - NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty" json:"npivDesiredNodeWwns,omitempty" vim:"4.0"` + NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty" json:"npivDesiredNodeWwns,omitempty"` // The NPIV port WWNs to be extended from the original list of WWN nummbers. // // This // property should be set to desired number which is an aggregate of existing // plus new numbers. Desired Node WWNs should always be greater than the existing // number of port WWNs - NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty" json:"npivDesiredPortWwns,omitempty" vim:"4.0"` + NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty" json:"npivDesiredPortWwns,omitempty"` // This property is used to enable or disable the NPIV capability on a desired // virtual machine on a temporary basis. // // When this property is set NPIV Vport // will not be instantiated by the VMX process of the Virtual Machine. When this // property is set port WWNs and node WWNs in the VM configuration are preserved. - NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled" json:"npivTemporaryDisabled,omitempty" vim:"4.0"` + NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled" json:"npivTemporaryDisabled,omitempty"` // This property is used to check whether the NPIV can be enabled on the Virtual // machine with non-rdm disks in the configuration, so this is potentially not // enabling npiv on vmfs disks. // // Also this property is used to check whether RDM // is required to generate WWNs for a virtual machine. - NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks" json:"npivOnNonRdmDisks,omitempty" vim:"4.0"` + NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks" json:"npivOnNonRdmDisks,omitempty"` // Hash incorporating the virtual machine's config file location // and the UUID of the host assigned to run the virtual machine. LocationId string `xml:"locationId,omitempty" json:"locationId,omitempty"` @@ -87443,7 +87019,7 @@ type VirtualMachineConfigInfo struct { // or `other-64`. // // See also `VirtualMachineConfigInfo.guestFullName`. - AlternateGuestName string `xml:"alternateGuestName" json:"alternateGuestName" vim:"2.5"` + AlternateGuestName string `xml:"alternateGuestName" json:"alternateGuestName"` // Description for the virtual machine. Annotation string `xml:"annotation,omitempty" json:"annotation,omitempty"` // Information about the files associated with a virtual machine. @@ -87467,21 +87043,21 @@ type VirtualMachineConfigInfo struct { // // The vcpuConfig array is indexed // by vcpu number. - VcpuConfig []VirtualMachineVcpuConfig `xml:"vcpuConfig,omitempty" json:"vcpuConfig,omitempty" vim:"7.0"` + VcpuConfig []VirtualMachineVcpuConfig `xml:"vcpuConfig,omitempty" json:"vcpuConfig,omitempty"` // Resource limits for CPU. CpuAllocation *ResourceAllocationInfo `xml:"cpuAllocation,omitempty" json:"cpuAllocation,omitempty"` // Resource limits for memory. MemoryAllocation *ResourceAllocationInfo `xml:"memoryAllocation,omitempty" json:"memoryAllocation,omitempty"` // The latency-sensitivity of the virtual machine. - LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty" json:"latencySensitivity,omitempty" vim:"5.1"` + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty" json:"latencySensitivity,omitempty"` // Whether memory can be added while this virtual machine is running. - MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled" json:"memoryHotAddEnabled,omitempty" vim:"2.5 U2"` + MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled" json:"memoryHotAddEnabled,omitempty"` // Whether virtual processors can be added while this // virtual machine is running. - CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled" json:"cpuHotAddEnabled,omitempty" vim:"2.5 U2"` + CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled" json:"cpuHotAddEnabled,omitempty"` // Whether virtual processors can be removed while this // virtual machine is running. - CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled" json:"cpuHotRemoveEnabled,omitempty" vim:"2.5 U2"` + CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled" json:"cpuHotRemoveEnabled,omitempty"` // The maximum amount of memory, in MB, than can be added to a // running virtual machine. // @@ -87489,7 +87065,7 @@ type VirtualMachineConfigInfo struct { // virtual machine and is specified only if // `VirtualMachineConfigInfo.memoryHotAddEnabled` // is set to true. - HotPlugMemoryLimit int64 `xml:"hotPlugMemoryLimit,omitempty" json:"hotPlugMemoryLimit,omitempty" vim:"2.5 U2"` + HotPlugMemoryLimit int64 `xml:"hotPlugMemoryLimit,omitempty" json:"hotPlugMemoryLimit,omitempty"` // Memory, in MB that can be added to a running virtual machine // must be in increments of this value and needs be a // multiple of this value. @@ -87497,7 +87073,7 @@ type VirtualMachineConfigInfo struct { // This value is determined by the virtual machine and is specified // only if `VirtualMachineConfigSpec.memoryHotAddEnabled` // has been set to true. - HotPlugMemoryIncrementSize int64 `xml:"hotPlugMemoryIncrementSize,omitempty" json:"hotPlugMemoryIncrementSize,omitempty" vim:"2.5 U2"` + HotPlugMemoryIncrementSize int64 `xml:"hotPlugMemoryIncrementSize,omitempty" json:"hotPlugMemoryIncrementSize,omitempty"` // Affinity settings for CPU. CpuAffinity *VirtualMachineAffinityInfo `xml:"cpuAffinity,omitempty" json:"cpuAffinity,omitempty"` // Deprecated since vSphere 6.0. @@ -87535,24 +87111,24 @@ type VirtualMachineConfigInfo struct { // policy is "inherit". // // See also `VirtualMachineConfigInfoSwapPlacementType_enum`. - SwapPlacement string `xml:"swapPlacement,omitempty" json:"swapPlacement,omitempty" vim:"2.5"` + SwapPlacement string `xml:"swapPlacement,omitempty" json:"swapPlacement,omitempty"` // Configuration options for the boot behavior of the virtual machine. - BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty" json:"bootOptions,omitempty" vim:"2.5"` + BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty" json:"bootOptions,omitempty"` // Fault Tolerance settings for this virtual machine. - FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr" json:"ftInfo,omitempty" vim:"4.0"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr" json:"ftInfo,omitempty"` // vSphere Replication settings for this virtual machine. // // Note this may become deprecated in the future releases. We discourage // any unnecessary dependency on this field. - RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty" json:"repConfig,omitempty" vim:"6.0"` + RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty" json:"repConfig,omitempty"` // vApp meta-data for the virtual machine - VAppConfig BaseVmConfigInfo `xml:"vAppConfig,omitempty,typeattr" json:"vAppConfig,omitempty" vim:"4.0"` + VAppConfig BaseVmConfigInfo `xml:"vAppConfig,omitempty,typeattr" json:"vAppConfig,omitempty"` // Indicates whether user-configured virtual asserts will be // triggered during virtual machine replay. - VAssertsEnabled *bool `xml:"vAssertsEnabled" json:"vAssertsEnabled,omitempty" vim:"4.0"` + VAssertsEnabled *bool `xml:"vAssertsEnabled" json:"vAssertsEnabled,omitempty"` // Indicates whether changed block tracking for this VM's disks // is active. - ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled" json:"changeTrackingEnabled,omitempty" vim:"4.0"` + ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled" json:"changeTrackingEnabled,omitempty"` // Information about firmware type for this Virtual Machine. // // Possible values are described in @@ -87562,46 +87138,46 @@ type VirtualMachineConfigInfo struct { // this property is set to bios, error is returned. // \- If this property is unset and vim.vm.FlagInfo.vbsEnabled is set // to true, this property is set to efi. - Firmware string `xml:"firmware,omitempty" json:"firmware,omitempty" vim:"5.0"` + Firmware string `xml:"firmware,omitempty" json:"firmware,omitempty"` // Indicates the maximum number of active remote display connections // that the virtual machine will support. - MaxMksConnections int32 `xml:"maxMksConnections,omitempty" json:"maxMksConnections,omitempty" vim:"5.0"` + MaxMksConnections int32 `xml:"maxMksConnections,omitempty" json:"maxMksConnections,omitempty"` // Indicates whether the guest operating system will logout any active // sessions whenever there are no remote display connections open to // the virtual machine. - GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled" json:"guestAutoLockEnabled,omitempty" vim:"5.0"` + GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled" json:"guestAutoLockEnabled,omitempty"` // Specifies that this VM is managed by a VC Extension. // // See the // `managedBy` property in the ConfigSpec // for more details. - ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty" vim:"5.0"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty"` // If set true, memory resource reservation for this virtual machine will always be // equal to the virtual machine's memory size; increases in memory size will be // rejected when a corresponding reservation increase is not possible. - MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax" json:"memoryReservationLockedToMax,omitempty" vim:"5.0"` + MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax" json:"memoryReservationLockedToMax,omitempty"` // Set of values to be used only to perform admission control when // determining if a host has sufficient resources for the virtual // machine to power on. - InitialOverhead *VirtualMachineConfigInfoOverheadInfo `xml:"initialOverhead,omitempty" json:"initialOverhead,omitempty" vim:"5.0"` + InitialOverhead *VirtualMachineConfigInfoOverheadInfo `xml:"initialOverhead,omitempty" json:"initialOverhead,omitempty"` // Indicates whether this VM is configured to use nested // hardware-assisted virtualization. - NestedHVEnabled *bool `xml:"nestedHVEnabled" json:"nestedHVEnabled,omitempty" vim:"5.1"` + NestedHVEnabled *bool `xml:"nestedHVEnabled" json:"nestedHVEnabled,omitempty"` // Indicates whether this VM have vurtual CPU performance counters // enabled. - VPMCEnabled *bool `xml:"vPMCEnabled" json:"vPMCEnabled,omitempty" vim:"5.1"` + VPMCEnabled *bool `xml:"vPMCEnabled" json:"vPMCEnabled,omitempty"` // Configuration of scheduled hardware upgrades and result from last // attempt to run scheduled hardware upgrade. // // See also `ScheduledHardwareUpgradeInfo`. - ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty" json:"scheduledHardwareUpgradeInfo,omitempty" vim:"5.1"` + ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty" json:"scheduledHardwareUpgradeInfo,omitempty"` // Fork configuration of this virtual machines. // // If unset, this virtual machine // is not configured for fork. // // See also `VirtualMachineForkConfigInfo`. - ForkConfigInfo *VirtualMachineForkConfigInfo `xml:"forkConfigInfo,omitempty" json:"forkConfigInfo,omitempty" vim:"6.0"` + ForkConfigInfo *VirtualMachineForkConfigInfo `xml:"forkConfigInfo,omitempty" json:"forkConfigInfo,omitempty"` // Deprecated since vSphere 7.0 because vFlash Read Cache // end of availability. // @@ -87611,39 +87187,39 @@ type VirtualMachineConfigInfo struct { // This reservation must be allocated to power on the VM. // See `VirtualMachineRuntimeInfo.vFlashCacheAllocation` for allocated // reservation when VM is powered on. - VFlashCacheReservation int64 `xml:"vFlashCacheReservation,omitempty" json:"vFlashCacheReservation,omitempty" vim:"5.5"` + VFlashCacheReservation int64 `xml:"vFlashCacheReservation,omitempty" json:"vFlashCacheReservation,omitempty"` // A checksum of vmx config file. - VmxConfigChecksum []byte `xml:"vmxConfigChecksum,omitempty" json:"vmxConfigChecksum,omitempty" vim:"6.0"` + VmxConfigChecksum []byte `xml:"vmxConfigChecksum,omitempty" json:"vmxConfigChecksum,omitempty"` // Whether to allow tunneling of clients from the guest VM into the // common message bus on the host network. - MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled" json:"messageBusTunnelEnabled,omitempty" vim:"6.0"` + MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled" json:"messageBusTunnelEnabled,omitempty"` // Virtual Machine Object Identifier. // // With Object-based Storage systems, Virtual Machine home directory // is backed by an object. // This identifier will be set only if VM directory resided on // object-based storage systems. - VmStorageObjectId string `xml:"vmStorageObjectId,omitempty" json:"vmStorageObjectId,omitempty" vim:"6.0"` + VmStorageObjectId string `xml:"vmStorageObjectId,omitempty" json:"vmStorageObjectId,omitempty"` // Virtual Machine Swap Object Identifier. // // With Object-based Storage systems, VM's Swap is backed by an object. // This identifier will be set only if VM swap resided on // object-based storage systems. - SwapStorageObjectId string `xml:"swapStorageObjectId,omitempty" json:"swapStorageObjectId,omitempty" vim:"6.0"` + SwapStorageObjectId string `xml:"swapStorageObjectId,omitempty" json:"swapStorageObjectId,omitempty"` // Virtual Machine cryptographic options. - KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty" vim:"6.5"` + KeyId *CryptoKeyId `xml:"keyId,omitempty" json:"keyId,omitempty"` // Guest integrity platform configuration - GuestIntegrityInfo *VirtualMachineGuestIntegrityInfo `xml:"guestIntegrityInfo,omitempty" json:"guestIntegrityInfo,omitempty" vim:"6.5"` + GuestIntegrityInfo *VirtualMachineGuestIntegrityInfo `xml:"guestIntegrityInfo,omitempty" json:"guestIntegrityInfo,omitempty"` // An enum describing whether encrypted vMotion is required for this VM. // // See `VirtualMachineConfigSpecEncryptedVMotionModes_enum` for allowed values. // This defaults to opportunistic for a regular VM, and will be set to // required for an encrypted VM. - MigrateEncryption string `xml:"migrateEncryption,omitempty" json:"migrateEncryption,omitempty" vim:"6.5"` + MigrateEncryption string `xml:"migrateEncryption,omitempty" json:"migrateEncryption,omitempty"` // Configuration of SGX, Software Guard Extensions for the VM. - SgxInfo *VirtualMachineSgxInfo `xml:"sgxInfo,omitempty" json:"sgxInfo,omitempty" vim:"7.0"` + SgxInfo *VirtualMachineSgxInfo `xml:"sgxInfo,omitempty" json:"sgxInfo,omitempty"` // Content Library Item info. - ContentLibItemInfo *VirtualMachineContentLibraryItemInfo `xml:"contentLibItemInfo,omitempty" json:"contentLibItemInfo,omitempty" vim:"7.0"` + ContentLibItemInfo *VirtualMachineContentLibraryItemInfo `xml:"contentLibItemInfo,omitempty" json:"contentLibItemInfo,omitempty"` // An enum describing whether encrypted Fault Tolerance is required for this // VM. // @@ -87654,7 +87230,7 @@ type VirtualMachineConfigInfo struct { // will be set to opportunistic. FtEncryptionMode string `xml:"ftEncryptionMode,omitempty" json:"ftEncryptionMode,omitempty" vim:"7.0.2.0"` // GMM configuration - GuestMonitoringModeInfo *VirtualMachineGuestMonitoringModeInfo `xml:"guestMonitoringModeInfo,omitempty" json:"guestMonitoringModeInfo,omitempty" vim:"7.0"` + GuestMonitoringModeInfo *VirtualMachineGuestMonitoringModeInfo `xml:"guestMonitoringModeInfo,omitempty" json:"guestMonitoringModeInfo,omitempty"` // SEV (Secure Encrypted Virtualization) enabled or not. // // SEV is enabled @@ -87714,6 +87290,22 @@ type VirtualMachineConfigInfo struct { // guest memory should be selected. If unset, the current value is // left unchanged. FixedPassthruHotPlugEnabled *bool `xml:"fixedPassthruHotPlugEnabled" json:"fixedPassthruHotPlugEnabled,omitempty" vim:"8.0.1.0"` + // Indicates whether FT Metro Cluster is enabled/disabled. + // + // \- If TRUE, FT Metro Cluster is enabled for the VM. An implicit + // Anti-HostGroup will be generated from HostGroup defined for FT + // primary, then affine the primary with one HostGroup and affine the + // secondary with another HostGroup. + // \- If FALSE or unset, FT Metro Cluster is disabled for the VM. Both FT + // primary and secondary will be put in the same HostGroup. + MetroFtEnabled *bool `xml:"metroFtEnabled" json:"metroFtEnabled,omitempty" vim:"8.0.3.0"` + // Indicate the Host Group (`ClusterHostGroup`) for FT + // Metro Cluster enabled Virtual Machine. + // + // Based on the selected Host Group, FT can divide the hosts in the cluster + // into two groups and ensure to place FT primary and FT secondary in + // different groups. + MetroFtHostGroup string `xml:"metroFtHostGroup,omitempty" json:"metroFtHostGroup,omitempty" vim:"8.0.3.0"` } func init() { @@ -87752,7 +87344,6 @@ type VirtualMachineConfigInfoOverheadInfo struct { func init() { t["VirtualMachineConfigInfoOverheadInfo"] = reflect.TypeOf((*VirtualMachineConfigInfoOverheadInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineConfigInfoOverheadInfo"] = "5.0" } // This configuration data object type contains information about the execution @@ -87798,16 +87389,16 @@ type VirtualMachineConfigOption struct { // // The acceptable monitor types // are enumerated by `VirtualMachineFlagInfoMonitorType_enum`. - SupportedMonitorType []string `xml:"supportedMonitorType" json:"supportedMonitorType" vim:"2.5"` + SupportedMonitorType []string `xml:"supportedMonitorType" json:"supportedMonitorType"` // Specifies the supported property transports that are // available for the OVF environment - SupportedOvfEnvironmentTransport []string `xml:"supportedOvfEnvironmentTransport,omitempty" json:"supportedOvfEnvironmentTransport,omitempty" vim:"4.0"` + SupportedOvfEnvironmentTransport []string `xml:"supportedOvfEnvironmentTransport,omitempty" json:"supportedOvfEnvironmentTransport,omitempty"` // Specifies the supported transports for the OVF // installation phase. - SupportedOvfInstallTransport []string `xml:"supportedOvfInstallTransport,omitempty" json:"supportedOvfInstallTransport,omitempty" vim:"4.0"` + SupportedOvfInstallTransport []string `xml:"supportedOvfInstallTransport,omitempty" json:"supportedOvfInstallTransport,omitempty"` // The relations between the properties of the virtual // machine config spec. - PropertyRelations []VirtualMachinePropertyRelation `xml:"propertyRelations,omitempty" json:"propertyRelations,omitempty" vim:"6.7"` + PropertyRelations []VirtualMachinePropertyRelation `xml:"propertyRelations,omitempty" json:"propertyRelations,omitempty"` } func init() { @@ -87834,7 +87425,7 @@ type VirtualMachineConfigOptionDescriptor struct { // Indicates whether the associated set of configuration options // can be used for virtual machine creation on a given host or // cluster. - CreateSupported *bool `xml:"createSupported" json:"createSupported,omitempty" vim:"2.5 U2"` + CreateSupported *bool `xml:"createSupported" json:"createSupported,omitempty"` // Indicates whether the associated set of virtual machine // configuration options is the default one for a given host or // cluster. @@ -87846,14 +87437,14 @@ type VirtualMachineConfigOptionDescriptor struct { // If this setting is TRUE, virtual machine creates will use the // associated set of configuration options, unless a config version is // explicitly specified in the `ConfigSpec`. - DefaultConfigOption *bool `xml:"defaultConfigOption" json:"defaultConfigOption,omitempty" vim:"2.5 U2"` + DefaultConfigOption *bool `xml:"defaultConfigOption" json:"defaultConfigOption,omitempty"` // Indicates whether the associated set of configuration options // can be used to power on a virtual machine on a given host or // cluster. - RunSupported *bool `xml:"runSupported" json:"runSupported,omitempty" vim:"5.1"` + RunSupported *bool `xml:"runSupported" json:"runSupported,omitempty"` // Indicates whether the associated set of configuration options // can be used as a virtual hardware upgrade target. - UpgradeSupported *bool `xml:"upgradeSupported" json:"upgradeSupported,omitempty" vim:"5.1"` + UpgradeSupported *bool `xml:"upgradeSupported" json:"upgradeSupported,omitempty"` } func init() { @@ -87906,7 +87497,7 @@ type VirtualMachineConfigSpec struct { // client will be ignored. // // Reconfigure privilege: VirtualMachine.Config.Settings - CreateDate *time.Time `xml:"createDate" json:"createDate,omitempty" vim:"6.7"` + CreateDate *time.Time `xml:"createDate" json:"createDate,omitempty"` // 128-bit SMBIOS UUID of a virtual machine represented as a hexadecimal string // in "12345678-abcd-1234-cdef-123456789abc" format. // @@ -87935,7 +87526,7 @@ type VirtualMachineConfigSpec struct { // the identifer is not allowed for Fault Tolerance virtual machines. // // Reconfigure privilege: VirtualMachine.Config.Settings - InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty" vim:"4.0"` + InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty"` // The NPIV node WWN to be assigned to a virtual machine. // // This property should only @@ -87947,7 +87538,7 @@ type VirtualMachineConfigSpec struct { // For detail description on WWN, see `VirtualMachineConfigInfo.npivNodeWorldWideName`. // // Reconfigure privilege: VirtualMachine.Config.Settings. - NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty" json:"npivNodeWorldWideName,omitempty" vim:"2.5"` + NpivNodeWorldWideName []int64 `xml:"npivNodeWorldWideName,omitempty" json:"npivNodeWorldWideName,omitempty"` // The NPIV port WWN to be assigned to a virtual machine. // // This property should only @@ -87959,7 +87550,7 @@ type VirtualMachineConfigSpec struct { // For detail description on WWN, see `VirtualMachineConfigInfo.npivPortWorldWideName`. // // Reconfigure privilege: VirtualMachine.Config.Settings. - NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty" json:"npivPortWorldWideName,omitempty" vim:"2.5"` + NpivPortWorldWideName []int64 `xml:"npivPortWorldWideName,omitempty" json:"npivPortWorldWideName,omitempty"` // This property is used internally in the communication between the // VirtualCenter server and ESX Server to indicate the source for // `VirtualMachineConfigSpec.npivNodeWorldWideName` and @@ -87974,21 +87565,21 @@ type VirtualMachineConfigSpec struct { // "set". // // Reconfigure privilege: VirtualMachine.Config.Settings. - NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty" json:"npivWorldWideNameType,omitempty" vim:"2.5"` + NpivWorldWideNameType string `xml:"npivWorldWideNameType,omitempty" json:"npivWorldWideNameType,omitempty"` // The NPIV node WWNs to be extended from the original list of WWN nummbers. // // This // property should be set to desired number which is an aggregate of existing // plus new numbers. Desired Node WWNs should always be greater than the existing // number of node WWNs - NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty" json:"npivDesiredNodeWwns,omitempty" vim:"4.0"` + NpivDesiredNodeWwns int16 `xml:"npivDesiredNodeWwns,omitempty" json:"npivDesiredNodeWwns,omitempty"` // The NPIV port WWNs to be extended from the original list of WWN nummbers. // // This // property should be set to desired number which is an aggregate of existing // plus new numbers. Desired Node WWNs should always be greater than the existing // number of port WWNs - NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty" json:"npivDesiredPortWwns,omitempty" vim:"4.0"` + NpivDesiredPortWwns int16 `xml:"npivDesiredPortWwns,omitempty" json:"npivDesiredPortWwns,omitempty"` // This property is used to enable or disable the NPIV capability on a desired // virtual machine on a temporary basis. // @@ -87997,14 +87588,14 @@ type VirtualMachineConfigSpec struct { // property is set port WWNs and node WWNs in the VM configuration are preserved. // // Reconfigure privilege: VirtualMachine.Config.Settings. - NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled" json:"npivTemporaryDisabled,omitempty" vim:"4.0"` + NpivTemporaryDisabled *bool `xml:"npivTemporaryDisabled" json:"npivTemporaryDisabled,omitempty"` // This property is used to check whether the NPIV can be enabled on the Virtual // machine with non-rdm disks in the configuration, so this is potentially not // enabling npiv on vmfs disks. // // Also this property is used to check whether RDM // is required to generate WWNs for a virtual machine. - NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks" json:"npivOnNonRdmDisks,omitempty" vim:"4.0"` + NpivOnNonRdmDisks *bool `xml:"npivOnNonRdmDisks" json:"npivOnNonRdmDisks,omitempty"` // The flag to indicate what type of NPIV WWN operation is going to be performed // on the virtual machine. // @@ -88014,7 +87605,7 @@ type VirtualMachineConfigSpec struct { // Reconfigure privilege: VirtualMachine.Config.Settings. // // See also `VirtualMachineConfigSpecNpivWwnOp_enum`. - NpivWorldWideNameOp string `xml:"npivWorldWideNameOp,omitempty" json:"npivWorldWideNameOp,omitempty" vim:"2.5"` + NpivWorldWideNameOp string `xml:"npivWorldWideNameOp,omitempty" json:"npivWorldWideNameOp,omitempty"` // 128-bit hash based on the virtual machine's configuration file location // and the UUID of the host assigned to run the virtual machine. // @@ -88034,7 +87625,7 @@ type VirtualMachineConfigSpec struct { // or `other-64`. // // Reconfigure privilege: VirtualMachine.Config.Settings - AlternateGuestName string `xml:"alternateGuestName,omitempty" json:"alternateGuestName,omitempty" vim:"2.5"` + AlternateGuestName string `xml:"alternateGuestName,omitempty" json:"alternateGuestName,omitempty"` // User-provided description of the virtual machine. // // Because this property @@ -88080,7 +87671,7 @@ type VirtualMachineConfigSpec struct { // // The vcpuConfig array is indexed // by vcpu number. - VcpuConfig []VirtualMachineVcpuConfig `xml:"vcpuConfig,omitempty" json:"vcpuConfig,omitempty" vim:"7.0"` + VcpuConfig []VirtualMachineVcpuConfig `xml:"vcpuConfig,omitempty" json:"vcpuConfig,omitempty"` // Number of cores among which to distribute // CPUs in this virtual machine. // @@ -88090,7 +87681,7 @@ type VirtualMachineConfigSpec struct { // if present, and use default coresPerSocket behavior. // Leave "numCoresPerSocket" unset to continue with existing // configuration (either manual or default). - NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty" json:"numCoresPerSocket,omitempty" vim:"5.0"` + NumCoresPerSocket int32 `xml:"numCoresPerSocket,omitempty" json:"numCoresPerSocket,omitempty"` // Size of a virtual machine's memory, in MB. // // Reconfigure privilege: VirtualMachine.Config.Memory @@ -88102,7 +87693,7 @@ type VirtualMachineConfigSpec struct { // powered-off. // // Reconfigure privilege: VirtualMachine.Config.Memory - MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled" json:"memoryHotAddEnabled,omitempty" vim:"2.5 U2"` + MemoryHotAddEnabled *bool `xml:"memoryHotAddEnabled" json:"memoryHotAddEnabled,omitempty"` // Indicates whether or not virtual processors can be added to // the virtual machine while it is running. // @@ -88110,7 +87701,7 @@ type VirtualMachineConfigSpec struct { // powered-off. // // Reconfigure privilege: VirtualMachine.Config.CpuCount - CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled" json:"cpuHotAddEnabled,omitempty" vim:"2.5 U2"` + CpuHotAddEnabled *bool `xml:"cpuHotAddEnabled" json:"cpuHotAddEnabled,omitempty"` // Indicates whether or not virtual processors can be removed // from the virtual machine while it is running. // @@ -88118,38 +87709,38 @@ type VirtualMachineConfigSpec struct { // powered-off. // // Reconfigure privilege: VirtualMachine.Config.CpuCount - CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled" json:"cpuHotRemoveEnabled,omitempty" vim:"2.5 U2"` + CpuHotRemoveEnabled *bool `xml:"cpuHotRemoveEnabled" json:"cpuHotRemoveEnabled,omitempty"` // Does this virtual machine have Virtual Intel I/O Controller Hub 7 - VirtualICH7MPresent *bool `xml:"virtualICH7MPresent" json:"virtualICH7MPresent,omitempty" vim:"5.0"` + VirtualICH7MPresent *bool `xml:"virtualICH7MPresent" json:"virtualICH7MPresent,omitempty"` // Does this virtual machine have System Management Controller - VirtualSMCPresent *bool `xml:"virtualSMCPresent" json:"virtualSMCPresent,omitempty" vim:"5.0"` + VirtualSMCPresent *bool `xml:"virtualSMCPresent" json:"virtualSMCPresent,omitempty"` // Set of virtual devices being modified by the configuration operation. // // Reconfigure privileges: - // - VirtualMachine.Config.Resource if setting the "shares" property of - // a new or existing VirtualDisk device - // - VirtualMachine.Config.RawDevice if adding, removing, or modifying a - // raw device (also required when creating a virtual machine) - // - VirtualMachine.Config.HostUSBDevice if adding, removing, or - // modifying a VirtualUSB device backed by a host USB device (also - // required when creating a virtual machine). - // - VirtualMachine.Interact.DeviceConnection if setting the "connectable" - // property of a connectable device - // - VirtualMachine.Interact.SetCDMedia if setting the "backing" property - // of a VirtualCdrom device - // - VirtualMachine.Interact.SetFloppyMedia if setting the "backing" property - // of a VirtualFloppy device - // - VirtualMachine.Config.EditDevice if setting any property of a - // non-CDROM non-Floppy device - // - VirtualMachine.Config.AddExistingDisk if adding a VirtualDisk, and - // the fileOperation is unset (also required when creating a virtual machine) - // - VirtualMachine.Config.AddNewDisk if adding a VirtualDisk and the - // fileOperation is set (also required when creating a virtual machine) - // - VirtualMachine.Config.RemoveDisk if removing a VirtualDisk device - // - VirtualMachine.Config.AddRemoveDevice if adding or removing any - // device other than disk, raw, or USB device. - // - Network.Assign if if setting the "backing" property of a - // VirtualEthernetCard device. + // - VirtualMachine.Config.Resource if setting the "shares" property of + // a new or existing VirtualDisk device + // - VirtualMachine.Config.RawDevice if adding, removing, or modifying a + // raw device (also required when creating a virtual machine) + // - VirtualMachine.Config.HostUSBDevice if adding, removing, or + // modifying a VirtualUSB device backed by a host USB device (also + // required when creating a virtual machine). + // - VirtualMachine.Interact.DeviceConnection if setting the "connectable" + // property of a connectable device + // - VirtualMachine.Interact.SetCDMedia if setting the "backing" property + // of a VirtualCdrom device + // - VirtualMachine.Interact.SetFloppyMedia if setting the "backing" property + // of a VirtualFloppy device + // - VirtualMachine.Config.EditDevice if setting any property of a + // non-CDROM non-Floppy device + // - VirtualMachine.Config.AddExistingDisk if adding a VirtualDisk, and + // the fileOperation is unset (also required when creating a virtual machine) + // - VirtualMachine.Config.AddNewDisk if adding a VirtualDisk and the + // fileOperation is set (also required when creating a virtual machine) + // - VirtualMachine.Config.RemoveDisk if removing a VirtualDisk device + // - VirtualMachine.Config.AddRemoveDevice if adding or removing any + // device other than disk, raw, or USB device. + // - Network.Assign if if setting the "backing" property of a + // VirtualEthernetCard device. DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr" json:"deviceChange,omitempty"` // Resource limits for CPU. // @@ -88162,7 +87753,7 @@ type VirtualMachineConfigSpec struct { // The latency-sensitivity setting of the virtual machine. // // Reconfigure privilege: VirtualMachine.Config.Resource - LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty" json:"latencySensitivity,omitempty" vim:"5.1"` + LatencySensitivity *LatencySensitivity `xml:"latencySensitivity,omitempty" json:"latencySensitivity,omitempty"` // Affinity settings for CPU. // // Reconfigure privilege: VirtualMachine.Config.Resource @@ -88210,7 +87801,7 @@ type VirtualMachineConfigSpec struct { // (also required when setting this property while creating a virtual machine) // // See also `VirtualMachineConfigInfoSwapPlacementType_enum`. - SwapPlacement string `xml:"swapPlacement,omitempty" json:"swapPlacement,omitempty" vim:"2.5"` + SwapPlacement string `xml:"swapPlacement,omitempty" json:"swapPlacement,omitempty"` // Settings that control the boot behavior of the virtual // machine. // @@ -88218,20 +87809,20 @@ type VirtualMachineConfigSpec struct { // of the virtual machine. // // Reconfigure privilege: VirtualMachine.Config.Settings - BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty" json:"bootOptions,omitempty" vim:"2.5"` + BootOptions *VirtualMachineBootOptions `xml:"bootOptions,omitempty" json:"bootOptions,omitempty"` // Configuration of vApp meta-data for a virtual machine - VAppConfig BaseVmConfigSpec `xml:"vAppConfig,omitempty,typeattr" json:"vAppConfig,omitempty" vim:"4.0"` + VAppConfig BaseVmConfigSpec `xml:"vAppConfig,omitempty,typeattr" json:"vAppConfig,omitempty"` // Fault Tolerance settings for this virtual machine. - FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr" json:"ftInfo,omitempty" vim:"4.0"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr" json:"ftInfo,omitempty"` // vSphere Replication settings. // // Note this may become deprecated in the future releases. We // discourage any unnecessary dependency on this field. - RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty" json:"repConfig,omitempty" vim:"6.0"` + RepConfig *ReplicationConfigSpec `xml:"repConfig,omitempty" json:"repConfig,omitempty"` // Set to true, if the vApp configuration should be removed // // Reconfigure privilege: VApp.ApplicationConfig - VAppConfigRemoved *bool `xml:"vAppConfigRemoved" json:"vAppConfigRemoved,omitempty" vim:"4.0"` + VAppConfigRemoved *bool `xml:"vAppConfigRemoved" json:"vAppConfigRemoved,omitempty"` // Indicates whether user-configured virtual asserts will be // triggered during virtual machine replay. // @@ -88240,7 +87831,7 @@ type VirtualMachineConfigSpec struct { // // Enabling this functionality can potentially cause some // performance overhead during virtual machine execution. - VAssertsEnabled *bool `xml:"vAssertsEnabled" json:"vAssertsEnabled,omitempty" vim:"4.0"` + VAssertsEnabled *bool `xml:"vAssertsEnabled" json:"vAssertsEnabled,omitempty"` // Setting to control enabling/disabling changed block tracking for // the virtual disks of this VM. // @@ -88253,24 +87844,24 @@ type VirtualMachineConfigSpec struct { // // Reconfigure privilege: VirtualMachine.Config.ChangeTracking // (also required when setting this property while creating a virtual machine) - ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled" json:"changeTrackingEnabled,omitempty" vim:"4.0"` + ChangeTrackingEnabled *bool `xml:"changeTrackingEnabled" json:"changeTrackingEnabled,omitempty"` // Set the desired firmware type for this Virtual Machine. // // Possible values are described in // `GuestOsDescriptorFirmwareType_enum` - Firmware string `xml:"firmware,omitempty" json:"firmware,omitempty" vim:"5.0"` + Firmware string `xml:"firmware,omitempty" json:"firmware,omitempty"` // If set, this setting limits the maximum number of active remote // display connections that the virtual machine will support to // the specified value. // // Reconfigure privilege: VirtualMachine.Config.MksControl - MaxMksConnections int32 `xml:"maxMksConnections,omitempty" json:"maxMksConnections,omitempty" vim:"5.0"` + MaxMksConnections int32 `xml:"maxMksConnections,omitempty" json:"maxMksConnections,omitempty"` // If set to True, this causes the guest operating system to automatically // logout any active sessions whenever there are no remote display // connections open to the virtual machine. // // Reconfigure privilege: VirtualMachine.Config.MksControl - GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled" json:"guestAutoLockEnabled,omitempty" vim:"5.0"` + GuestAutoLockEnabled *bool `xml:"guestAutoLockEnabled" json:"guestAutoLockEnabled,omitempty"` // Specifies that this VM is managed by a VC Extension. // // This information is primarily used in the Client to show a custom icon for @@ -88284,7 +87875,7 @@ type VirtualMachineConfigSpec struct { // empty `extensionKey`. // // Reconfigure privilege: VirtualMachine.Config.ManagedBy - ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty" vim:"5.0"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty"` // If set true, memory resource reservation for this virtual machine will always be // equal to the virtual machine's memory size; increases in memory size will be // rejected when a corresponding reservation increase is not possible. @@ -88293,7 +87884,7 @@ type VirtualMachineConfigSpec struct { // may only be enabled if it is currently possible to reserve all of the virtual machine's memory. // // Reconfigure privilege: VirtualMachine.Config.Resource - MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax" json:"memoryReservationLockedToMax,omitempty" vim:"5.0"` + MemoryReservationLockedToMax *bool `xml:"memoryReservationLockedToMax" json:"memoryReservationLockedToMax,omitempty"` // Specifies that this VM will use nested hardware-assisted virtualization. // // When creating a new VM: @@ -88303,18 +87894,18 @@ type VirtualMachineConfigSpec struct { // true, the value of this flag is set to true. // // Reconfigure privilege: VirtualMachine.Config.Settings - NestedHVEnabled *bool `xml:"nestedHVEnabled" json:"nestedHVEnabled,omitempty" vim:"5.1"` + NestedHVEnabled *bool `xml:"nestedHVEnabled" json:"nestedHVEnabled,omitempty"` // Specifies that this VM will have virtual CPU performance counters // enabled. // // Reconfigure privilege: VirtualMachine.Config.Settings - VPMCEnabled *bool `xml:"vPMCEnabled" json:"vPMCEnabled,omitempty" vim:"5.1"` + VPMCEnabled *bool `xml:"vPMCEnabled" json:"vPMCEnabled,omitempty"` // Configuration of scheduled hardware upgrades. // // Reconfigure privilege: VirtualMachine.Config.UpgradeVirtualHardware // // See also `ScheduledHardwareUpgradeInfo`. - ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty" json:"scheduledHardwareUpgradeInfo,omitempty" vim:"5.1"` + ScheduledHardwareUpgradeInfo *ScheduledHardwareUpgradeInfo `xml:"scheduledHardwareUpgradeInfo,omitempty" json:"scheduledHardwareUpgradeInfo,omitempty"` // Virtual Machine Profile requirement. // // Profiles are solution specific. @@ -88323,24 +87914,24 @@ type VirtualMachineConfigSpec struct { // interact with it. // This is an optional parameter and if user doesn't specify profile, // the default behavior will apply. - VmProfile []BaseVirtualMachineProfileSpec `xml:"vmProfile,omitempty,typeattr" json:"vmProfile,omitempty" vim:"5.5"` + VmProfile []BaseVirtualMachineProfileSpec `xml:"vmProfile,omitempty,typeattr" json:"vmProfile,omitempty"` // Whether to allow tunneling of clients from the guest VM into the // common message bus on the host network. - MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled" json:"messageBusTunnelEnabled,omitempty" vim:"6.0"` + MessageBusTunnelEnabled *bool `xml:"messageBusTunnelEnabled" json:"messageBusTunnelEnabled,omitempty"` // Virtual Machine cryptographic options. // // The cryptographic options are inherited to all disks of the VM. // The cryptographic options for a disk can be different by setting // its CryptoSpec. - Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr" json:"crypto,omitempty" vim:"6.5"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr" json:"crypto,omitempty"` // An enum describing whether encrypted vMotion is required for this VM. // // Supported values are listed in `VirtualMachineConfigSpecEncryptedVMotionModes_enum`. // This defaults to opportunistic for a regular VM, and will be set to // required for an encrypted VM. - MigrateEncryption string `xml:"migrateEncryption,omitempty" json:"migrateEncryption,omitempty" vim:"6.5"` + MigrateEncryption string `xml:"migrateEncryption,omitempty" json:"migrateEncryption,omitempty"` // Configuration of SGX, Software Guard Extensions for the VM. - SgxInfo *VirtualMachineSgxInfo `xml:"sgxInfo,omitempty" json:"sgxInfo,omitempty" vim:"7.0"` + SgxInfo *VirtualMachineSgxInfo `xml:"sgxInfo,omitempty" json:"sgxInfo,omitempty"` // An enum describing whether encrypted Fault Tolerance is required // for this VM. // @@ -88351,7 +87942,7 @@ type VirtualMachineConfigSpec struct { // will be set to opportunistic. FtEncryptionMode string `xml:"ftEncryptionMode,omitempty" json:"ftEncryptionMode,omitempty" vim:"7.0.2.0"` // Configuration of GMM, Guest Monitoring Mode for the VM. - GuestMonitoringModeInfo *VirtualMachineGuestMonitoringModeInfo `xml:"guestMonitoringModeInfo,omitempty" json:"guestMonitoringModeInfo,omitempty" vim:"7.0"` + GuestMonitoringModeInfo *VirtualMachineGuestMonitoringModeInfo `xml:"guestMonitoringModeInfo,omitempty" json:"guestMonitoringModeInfo,omitempty"` // SEV (Secure Encrypted Virtualization) enabled or not. // // SEV is enabled when @@ -88420,6 +88011,22 @@ type VirtualMachineConfigSpec struct { // guest memory should be selected. If unset, the current value // is left unchanged. FixedPassthruHotPlugEnabled *bool `xml:"fixedPassthruHotPlugEnabled" json:"fixedPassthruHotPlugEnabled,omitempty" vim:"8.0.1.0"` + // Indicates whether FT Metro Cluster is enabled/disabled. + // + // \- If TRUE, FT Metro Cluster is enabled for the VM. An implicit + // Anti-HostGroup will be generated from HostGroup defined for FT + // primary, then affine the primary with one HostGroup and affine the + // secondary with another HostGroup. + // \- If FALSE or unset, FT Metro Cluster is disabled for the VM. Both FT + // primary and secondary will be put in the same HostGroup. + MetroFtEnabled *bool `xml:"metroFtEnabled" json:"metroFtEnabled,omitempty" vim:"8.0.3.0"` + // Indicate the Host Group (`ClusterHostGroup`) for FT + // Metro Cluster enabled Virtual Machine. + // + // Based on the selected Host Group, FT can divide the hosts in the cluster + // into two groups and ensure to place FT primary and FT secondary in + // different groups. + MetroFtHostGroup string `xml:"metroFtHostGroup,omitempty" json:"metroFtHostGroup,omitempty" vim:"8.0.3.0"` } func init() { @@ -88451,7 +88058,7 @@ type VirtualMachineConfigSummary struct { // Virtual machine BIOS identification. Uuid string `xml:"uuid,omitempty" json:"uuid,omitempty"` // VC-specific identifier of the virtual machine - InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty" vim:"4.0"` + InstanceUuid string `xml:"instanceUuid,omitempty" json:"instanceUuid,omitempty"` // Guest operating system identifier (short name). GuestId string `xml:"guestId,omitempty" json:"guestId,omitempty"` // Guest operating system name configured on the virtual machine. @@ -88461,30 +88068,30 @@ type VirtualMachineConfigSummary struct { // Product information. // // References to properties in the URLs are expanded. - Product *VAppProductInfo `xml:"product,omitempty" json:"product,omitempty" vim:"4.0"` + Product *VAppProductInfo `xml:"product,omitempty" json:"product,omitempty"` // Whether the VM requires a reboot to finish installation. // // False if no vApp // meta-data is configured. - InstallBootRequired *bool `xml:"installBootRequired" json:"installBootRequired,omitempty" vim:"4.0"` + InstallBootRequired *bool `xml:"installBootRequired" json:"installBootRequired,omitempty"` // Fault Tolerance settings for this virtual machine. // // This property will be populated only for fault tolerance virtual // machines and will be left unset for all other virtual machines. // See `FaultToleranceConfigInfo` for a description. - FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr" json:"ftInfo,omitempty" vim:"4.0"` + FtInfo BaseFaultToleranceConfigInfo `xml:"ftInfo,omitempty,typeattr" json:"ftInfo,omitempty"` // Specifies that this VM is managed by a VC Extension. // // See the // `managedBy` property in the ConfigSpec // for more details. - ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty" vim:"5.0"` + ManagedBy *ManagedByInfo `xml:"managedBy,omitempty" json:"managedBy,omitempty"` // Is TPM present in a VM? - TpmPresent *bool `xml:"tpmPresent" json:"tpmPresent,omitempty" vim:"6.7"` + TpmPresent *bool `xml:"tpmPresent" json:"tpmPresent,omitempty"` // Number of VMIOP backed devices attached to the virtual machine. - NumVmiopBackings int32 `xml:"numVmiopBackings,omitempty" json:"numVmiopBackings,omitempty" vim:"6.7"` + NumVmiopBackings int32 `xml:"numVmiopBackings,omitempty" json:"numVmiopBackings,omitempty"` // The hardware version string for this virtual machine. - HwVersion string `xml:"hwVersion,omitempty" json:"hwVersion,omitempty" vim:"6.9.1"` + HwVersion string `xml:"hwVersion,omitempty" json:"hwVersion,omitempty"` } func init() { @@ -88553,7 +88160,6 @@ type VirtualMachineContentLibraryItemInfo struct { func init() { t["VirtualMachineContentLibraryItemInfo"] = reflect.TypeOf((*VirtualMachineContentLibraryItemInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineContentLibraryItemInfo"] = "7.0" } // Wrapper class to support incremental updates of the cpuFeatureMask. @@ -88584,11 +88190,11 @@ type VirtualMachineDatastoreInfo struct { // The maximum size of a file that can reside on this datastore. MaxFileSize int64 `xml:"maxFileSize" json:"maxFileSize"` // The maximum capacity of a virtual disk which can be created on this volume - MaxVirtualDiskCapacity int64 `xml:"maxVirtualDiskCapacity,omitempty" json:"maxVirtualDiskCapacity,omitempty" vim:"5.5"` + MaxVirtualDiskCapacity int64 `xml:"maxVirtualDiskCapacity,omitempty" json:"maxVirtualDiskCapacity,omitempty"` // Maximum raw device mapping size (physical compatibility) - MaxPhysicalRDMFileSize int64 `xml:"maxPhysicalRDMFileSize,omitempty" json:"maxPhysicalRDMFileSize,omitempty" vim:"6.0"` + MaxPhysicalRDMFileSize int64 `xml:"maxPhysicalRDMFileSize,omitempty" json:"maxPhysicalRDMFileSize,omitempty"` // Maximum raw device mapping size (virtual compatibility) - MaxVirtualRDMFileSize int64 `xml:"maxVirtualRDMFileSize,omitempty" json:"maxVirtualRDMFileSize,omitempty" vim:"6.0"` + MaxVirtualRDMFileSize int64 `xml:"maxVirtualRDMFileSize,omitempty" json:"maxVirtualRDMFileSize,omitempty"` // Access mode for this datastore. // // This is either @@ -88604,22 +88210,22 @@ type VirtualMachineDatastoreInfo struct { // In the case of a cluster compute resource, this property // is aggregated from the values reported by individual hosts // as follows: - // - If at least one host reports - // `vStorageSupported`, - // then it is set to - // `vStorageSupported`. - // - Else if at least one host reports - // `vStorageUnknown`, - // it is set to - // `vStorageUnknown`. - // - Else if at least one host reports - // `vStorageUnsupported`, - // it is set to - // `vStorageUnsupported`. - // - Else it is unset. + // - If at least one host reports + // `vStorageSupported`, + // then it is set to + // `vStorageSupported`. + // - Else if at least one host reports + // `vStorageUnknown`, + // it is set to + // `vStorageUnknown`. + // - Else if at least one host reports + // `vStorageUnsupported`, + // it is set to + // `vStorageUnsupported`. + // - Else it is unset. // // See also `FileSystemMountInfoVStorageSupportStatus_enum`. - VStorageSupport string `xml:"vStorageSupport,omitempty" json:"vStorageSupport,omitempty" vim:"5.0"` + VStorageSupport string `xml:"vStorageSupport,omitempty" json:"vStorageSupport,omitempty"` } func init() { @@ -88659,30 +88265,30 @@ type VirtualMachineDefaultPowerOpInfo struct { // Describes the default power off type for this virtual machine. // // The possible values are specified by the PowerOpType. - // - hard - Perform power off by using the PowerOff method. - // - soft - Perform power off by using the ShutdownGuest method. - // - preset - The preset value is specified in the defaultPowerOffType - // section. + // - hard - Perform power off by using the PowerOff method. + // - soft - Perform power off by using the ShutdownGuest method. + // - preset - The preset value is specified in the defaultPowerOffType + // section. // // This setting is advisory and clients can choose to ignore it. PowerOffType string `xml:"powerOffType,omitempty" json:"powerOffType,omitempty"` // Describes the default suspend type for this virtual machine. // // The possible values are specified by the PowerOpType. - // - hard - Perform suspend by using the Suspend method. - // - soft - Perform suspend by using the StandbyGuest method. - // - preset - The preset value is specified in the defaultSuspendType - // section. + // - hard - Perform suspend by using the Suspend method. + // - soft - Perform suspend by using the StandbyGuest method. + // - preset - The preset value is specified in the defaultSuspendType + // section. // // This setting is advisory and clients can choose to ignore it. SuspendType string `xml:"suspendType,omitempty" json:"suspendType,omitempty"` // Describes the default reset type for this virtual machine. // // The possible values are specified by the PowerOpType. - // - hard - Perform reset by using the Reset method. - // - soft - Perform reset by using the RebootGuest method. - // - preset - The preset value is specified in the defaultResetType - // section. + // - hard - Perform reset by using the Reset method. + // - soft - Perform reset by using the RebootGuest method. + // - preset - The preset value is specified in the defaultResetType + // section. // // This setting is advisory and clients can choose to ignore it. ResetType string `xml:"resetType,omitempty" json:"resetType,omitempty"` @@ -88712,7 +88318,6 @@ type VirtualMachineDefaultProfileSpec struct { func init() { t["VirtualMachineDefaultProfileSpec"] = reflect.TypeOf((*VirtualMachineDefaultProfileSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineDefaultProfileSpec"] = "6.0" } // Policy specification that carries a pre-defined Storage Policy to be associated @@ -88733,7 +88338,7 @@ type VirtualMachineDefinedProfileSpec struct { ProfileId string `xml:"profileId" json:"profileId"` // Specification containing replication related parameters, sent to the Replication Data Service // provider. - ReplicationSpec *ReplicationSpec `xml:"replicationSpec,omitempty" json:"replicationSpec,omitempty" vim:"6.5"` + ReplicationSpec *ReplicationSpec `xml:"replicationSpec,omitempty" json:"replicationSpec,omitempty"` // Profile data sent to the Storage Backend by vSphere. // // This data is provided by the SPBM component of the vSphere platform. @@ -88742,12 +88347,11 @@ type VirtualMachineDefinedProfileSpec struct { // Parameterized Storage Profiles // Extra configuration that is not expressed as a capability in the Profile // definition. - ProfileParams []KeyValue `xml:"profileParams,omitempty" json:"profileParams,omitempty" vim:"6.7"` + ProfileParams []KeyValue `xml:"profileParams,omitempty" json:"profileParams,omitempty"` } func init() { t["VirtualMachineDefinedProfileSpec"] = reflect.TypeOf((*VirtualMachineDefinedProfileSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineDefinedProfileSpec"] = "5.5" } // The DeviceRuntimeInfo data object type provides information about @@ -88763,7 +88367,6 @@ type VirtualMachineDeviceRuntimeInfo struct { func init() { t["VirtualMachineDeviceRuntimeInfo"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineDeviceRuntimeInfo"] = "4.1" } // Runtime state of a device. @@ -88776,7 +88379,6 @@ type VirtualMachineDeviceRuntimeInfoDeviceRuntimeState struct { func init() { t["VirtualMachineDeviceRuntimeInfoDeviceRuntimeState"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoDeviceRuntimeState)(nil)).Elem() - minAPIVersionForType["VirtualMachineDeviceRuntimeInfoDeviceRuntimeState"] = "4.1" } // Runtime state of a virtual ethernet card device. @@ -88884,7 +88486,7 @@ type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState struct { // // `green` indicates that the reservation specified on the // virtual network adapter is being fulfilled. - ReservationStatus string `xml:"reservationStatus,omitempty" json:"reservationStatus,omitempty" vim:"5.5"` + ReservationStatus string `xml:"reservationStatus,omitempty" json:"reservationStatus,omitempty"` // The status indicating the state of virtual network adapter's attachment // to an opaque network. // @@ -88896,15 +88498,14 @@ type VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState struct { // // `green` indicates that the network // adapater is successfully attached to opaque network. - AttachmentStatus string `xml:"attachmentStatus,omitempty" json:"attachmentStatus,omitempty" vim:"6.7"` + AttachmentStatus string `xml:"attachmentStatus,omitempty" json:"attachmentStatus,omitempty"` // These network adapter requirements must have equivalent capabilities // on the virtual switch in order to power on or migrate to the host. - FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty" json:"featureRequirement,omitempty" vim:"6.7"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty" json:"featureRequirement,omitempty"` } func init() { t["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState"] = reflect.TypeOf((*VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState)(nil)).Elem() - minAPIVersionForType["VirtualMachineDeviceRuntimeInfoVirtualEthernetCardRuntimeState"] = "4.1" } // The DiskDeviceInfo class contains basic information about a specific disk hardware @@ -88941,7 +88542,6 @@ type VirtualMachineDisplayTopology struct { func init() { t["VirtualMachineDisplayTopology"] = reflect.TypeOf((*VirtualMachineDisplayTopology)(nil)).Elem() - minAPIVersionForType["VirtualMachineDisplayTopology"] = "2.5 U2" } // Description of a Device Virtualization Extensions (DVX) device class. @@ -88984,7 +88584,6 @@ type VirtualMachineDynamicPassthroughInfo struct { func init() { t["VirtualMachineDynamicPassthroughInfo"] = reflect.TypeOf((*VirtualMachineDynamicPassthroughInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineDynamicPassthroughInfo"] = "7.0" } // The EmptyIndependentFilterSpec data object is used to specify empty independent @@ -89013,7 +88612,6 @@ type VirtualMachineEmptyProfileSpec struct { func init() { t["VirtualMachineEmptyProfileSpec"] = reflect.TypeOf((*VirtualMachineEmptyProfileSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineEmptyProfileSpec"] = "5.5" } // Feature requirement contains a key, featureName and an opaque value @@ -89035,7 +88633,6 @@ type VirtualMachineFeatureRequirement struct { func init() { t["VirtualMachineFeatureRequirement"] = reflect.TypeOf((*VirtualMachineFeatureRequirement)(nil)).Elem() - minAPIVersionForType["VirtualMachineFeatureRequirement"] = "5.1" } // The FileInfo data object type contains the locations of virtual machine @@ -89087,7 +88684,7 @@ type VirtualMachineFileInfo struct { LogDirectory string `xml:"logDirectory,omitempty" json:"logDirectory,omitempty"` // Directory to store the fault tolerance meta data files for the // virtual machine. - FtMetadataDirectory string `xml:"ftMetadataDirectory,omitempty" json:"ftMetadataDirectory,omitempty" vim:"6.0"` + FtMetadataDirectory string `xml:"ftMetadataDirectory,omitempty" json:"ftMetadataDirectory,omitempty"` } func init() { @@ -89193,7 +88790,6 @@ type VirtualMachineFileLayoutEx struct { func init() { t["VirtualMachineFileLayoutEx"] = reflect.TypeOf((*VirtualMachineFileLayoutEx)(nil)).Elem() - minAPIVersionForType["VirtualMachineFileLayoutEx"] = "4.0" } // Layout of a virtual disk, including the base- and delta- disks. @@ -89210,7 +88806,6 @@ type VirtualMachineFileLayoutExDiskLayout struct { func init() { t["VirtualMachineFileLayoutExDiskLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutExDiskLayout)(nil)).Elem() - minAPIVersionForType["VirtualMachineFileLayoutExDiskLayout"] = "4.0" } // Information about a single unit of a virtual disk, such as @@ -89237,7 +88832,6 @@ type VirtualMachineFileLayoutExDiskUnit struct { func init() { t["VirtualMachineFileLayoutExDiskUnit"] = reflect.TypeOf((*VirtualMachineFileLayoutExDiskUnit)(nil)).Elem() - minAPIVersionForType["VirtualMachineFileLayoutExDiskUnit"] = "4.0" } // Basic information about a file. @@ -89270,21 +88864,20 @@ type VirtualMachineFileLayoutExFileInfo struct { // might be set but the value could be over-estimated due to // the inability of the NAS based storage to provide an // accurate value. - UniqueSize int64 `xml:"uniqueSize,omitempty" json:"uniqueSize,omitempty" vim:"5.1"` + UniqueSize int64 `xml:"uniqueSize,omitempty" json:"uniqueSize,omitempty"` // Backing object's durable and unmutable identifier. // // Each backing object has a unique identifier which is not settable. // This property is applied to the file backed by a storage object, // such as vvol. - BackingObjectId string `xml:"backingObjectId,omitempty" json:"backingObjectId,omitempty" vim:"6.0"` + BackingObjectId string `xml:"backingObjectId,omitempty" json:"backingObjectId,omitempty"` // Flag which indicates the accessibility of the file // when the file info object was created. - Accessible *bool `xml:"accessible" json:"accessible,omitempty" vim:"6.0"` + Accessible *bool `xml:"accessible" json:"accessible,omitempty"` } func init() { t["VirtualMachineFileLayoutExFileInfo"] = reflect.TypeOf((*VirtualMachineFileLayoutExFileInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineFileLayoutExFileInfo"] = "4.0" } // Layout of a snapshot. @@ -89302,7 +88895,7 @@ type VirtualMachineFileLayoutExSnapshotLayout struct { // Powered off snapshots do not have a memory component and in some cases // the memory component is combined with the data component. When a memory // component does not exist, the value is initialized to -1. - MemoryKey int32 `xml:"memoryKey,omitempty" json:"memoryKey,omitempty" vim:"6.0"` + MemoryKey int32 `xml:"memoryKey,omitempty" json:"memoryKey,omitempty"` // Layout of each virtual disk of the virtual machine when the // snapshot was taken. Disk []VirtualMachineFileLayoutExDiskLayout `xml:"disk,omitempty" json:"disk,omitempty"` @@ -89310,7 +88903,6 @@ type VirtualMachineFileLayoutExSnapshotLayout struct { func init() { t["VirtualMachineFileLayoutExSnapshotLayout"] = reflect.TypeOf((*VirtualMachineFileLayoutExSnapshotLayout)(nil)).Elem() - minAPIVersionForType["VirtualMachineFileLayoutExSnapshotLayout"] = "4.0" } // Enumerates the set of files that make up a snapshot or redo-point @@ -89355,7 +88947,7 @@ type VirtualMachineFlagInfo struct { // // See `VirtualMachineFlagInfoMonitorType_enum` // for possible values for this property. - MonitorType string `xml:"monitorType,omitempty" json:"monitorType,omitempty" vim:"2.5"` + MonitorType string `xml:"monitorType,omitempty" json:"monitorType,omitempty"` // Deprecated as of vSphere API 6.7. // // Specifies how the VCPUs of a virtual machine are allowed to @@ -89371,9 +88963,9 @@ type VirtualMachineFlagInfo struct { // // Flag to specify whether snapshots are disabled for this virtual // machine. - SnapshotDisabled *bool `xml:"snapshotDisabled" json:"snapshotDisabled,omitempty" vim:"2.5"` + SnapshotDisabled *bool `xml:"snapshotDisabled" json:"snapshotDisabled,omitempty"` // Flag to specify whether the snapshot tree is locked for this virtual machine. - SnapshotLocked *bool `xml:"snapshotLocked" json:"snapshotLocked,omitempty" vim:"2.5"` + SnapshotLocked *bool `xml:"snapshotLocked" json:"snapshotLocked,omitempty"` // Indicates whether disk UUIDs are being used by this virtual machine. // // If this flag is set to false, disk UUIDs are not exposed to the guest. @@ -89384,7 +88976,7 @@ type VirtualMachineFlagInfo struct { // virtual machines where the ability to move to older platforms is // important, this flag should be set to false. If the value is unset, // the behavior 'false' will be used. - DiskUuidEnabled *bool `xml:"diskUuidEnabled" json:"diskUuidEnabled,omitempty" vim:"2.5"` + DiskUuidEnabled *bool `xml:"diskUuidEnabled" json:"diskUuidEnabled,omitempty"` // Indicates whether or not the system will try to use nested page // table hardware support, if available. // @@ -89398,7 +88990,7 @@ type VirtualMachineFlagInfo struct { // // `VirtualMachineFlagInfoVirtualMmuUsage_enum` represents the set of // possible values. - VirtualMmuUsage string `xml:"virtualMmuUsage,omitempty" json:"virtualMmuUsage,omitempty" vim:"2.5"` + VirtualMmuUsage string `xml:"virtualMmuUsage,omitempty" json:"virtualMmuUsage,omitempty"` // Indicates whether or not the system will try to use Hardware // Virtualization (HV) support for instruction virtualization, // if available. @@ -89432,7 +89024,7 @@ type VirtualMachineFlagInfo struct { // (hvOn, on) - Use both VT/AMD-V and EPT/RVI. // (hvOn, off) - Use VT/AMD-V but do not use EPT/RVI. // (hvOff, off) - Do not use any of these hardware acceleration technologies. - VirtualExecUsage string `xml:"virtualExecUsage,omitempty" json:"virtualExecUsage,omitempty" vim:"4.0"` + VirtualExecUsage string `xml:"virtualExecUsage,omitempty" json:"virtualExecUsage,omitempty"` // Specifies the power-off behavior for a virtual machine that has // a snapshot. // @@ -89440,7 +89032,7 @@ type VirtualMachineFlagInfo struct { // be used. // // See also `VirtualMachinePowerOffBehavior_enum`. - SnapshotPowerOffBehavior string `xml:"snapshotPowerOffBehavior,omitempty" json:"snapshotPowerOffBehavior,omitempty" vim:"2.5"` + SnapshotPowerOffBehavior string `xml:"snapshotPowerOffBehavior,omitempty" json:"snapshotPowerOffBehavior,omitempty"` // Deprecated as of vSphere API 6.0. // // Flag to specify whether record and replay operations are @@ -89453,20 +89045,20 @@ type VirtualMachineFlagInfo struct { // already has a recording, replay will be disallowed, though // the recording will be preserved. // If the value is unset, the behavior 'false' will be used. - RecordReplayEnabled *bool `xml:"recordReplayEnabled" json:"recordReplayEnabled,omitempty" vim:"4.0"` + RecordReplayEnabled *bool `xml:"recordReplayEnabled" json:"recordReplayEnabled,omitempty"` // Indicates the type of fault tolerance type the virtual machine is // configured to use. // // `VirtualMachineFaultToleranceType_enum` represents the set of // possible values. - FaultToleranceType string `xml:"faultToleranceType,omitempty" json:"faultToleranceType,omitempty" vim:"6.0"` + FaultToleranceType string `xml:"faultToleranceType,omitempty" json:"faultToleranceType,omitempty"` // Flag to specify whether common CBRC digest cache is enabled for this // virtual machine. // // The common CBRC cache is shared between the hot added disks in the VM. // If this flag is set to 'true' the VM will allocate a commont digest // cache on power on. - CbrcCacheEnabled *bool `xml:"cbrcCacheEnabled" json:"cbrcCacheEnabled,omitempty" vim:"6.5"` + CbrcCacheEnabled *bool `xml:"cbrcCacheEnabled" json:"cbrcCacheEnabled,omitempty"` // Flag to specify if Intel Virtualization Technology for Directed I/O // is enabled for this virtual machine. // @@ -89475,7 +89067,7 @@ type VirtualMachineFlagInfo struct { // and this flag is set to false error is returned. // \- If this flag is unset and vim.vm.FlagInfo.vbsEnabled is set to // true, the value of this flag is set to true. - VvtdEnabled *bool `xml:"vvtdEnabled" json:"vvtdEnabled,omitempty" vim:"6.7"` + VvtdEnabled *bool `xml:"vvtdEnabled" json:"vvtdEnabled,omitempty"` // Flag to specify if Virtualization-based security // is enabled for this virtual machine. // @@ -89490,7 +89082,7 @@ type VirtualMachineFlagInfo struct { // returned. // \- If vim.vm.firmware is not set to bios, it is set // to efi. Else error is returned. - VbsEnabled *bool `xml:"vbsEnabled" json:"vbsEnabled,omitempty" vim:"6.7"` + VbsEnabled *bool `xml:"vbsEnabled" json:"vbsEnabled,omitempty"` } func init() { @@ -89528,7 +89120,7 @@ type VirtualMachineForkConfigInfo struct { // belongs to. // // Applicable for parent VirtualMachines only. - ParentForkGroupId string `xml:"parentForkGroupId,omitempty" json:"parentForkGroupId,omitempty" vim:"6.5"` + ParentForkGroupId string `xml:"parentForkGroupId,omitempty" json:"parentForkGroupId,omitempty"` // The flag to indicate the fork child type. // // For a persistent child @@ -89541,7 +89133,6 @@ type VirtualMachineForkConfigInfo struct { func init() { t["VirtualMachineForkConfigInfo"] = reflect.TypeOf((*VirtualMachineForkConfigInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineForkConfigInfo"] = "6.0" } // This data object describes the guest integrity platform configuration of @@ -89559,7 +89150,6 @@ type VirtualMachineGuestIntegrityInfo struct { func init() { t["VirtualMachineGuestIntegrityInfo"] = reflect.TypeOf((*VirtualMachineGuestIntegrityInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineGuestIntegrityInfo"] = "6.5" } // This data object describes the GMM (Guest Mode Monitoring) configuration @@ -89573,7 +89163,6 @@ type VirtualMachineGuestMonitoringModeInfo struct { func init() { t["VirtualMachineGuestMonitoringModeInfo"] = reflect.TypeOf((*VirtualMachineGuestMonitoringModeInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineGuestMonitoringModeInfo"] = "7.0" } // This data object type encapsulates configuration settings @@ -89590,7 +89179,6 @@ type VirtualMachineGuestQuiesceSpec struct { func init() { t["VirtualMachineGuestQuiesceSpec"] = reflect.TypeOf((*VirtualMachineGuestQuiesceSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineGuestQuiesceSpec"] = "6.5" } // A subset of virtual machine guest information. @@ -89610,19 +89198,19 @@ type VirtualMachineGuestSummary struct { // // Current version status of VMware Tools in the guest operating system, // if known. - ToolsVersionStatus string `xml:"toolsVersionStatus,omitempty" json:"toolsVersionStatus,omitempty" vim:"4.0"` + ToolsVersionStatus string `xml:"toolsVersionStatus,omitempty" json:"toolsVersionStatus,omitempty"` // Current version status of VMware Tools in the guest operating system, // if known. - ToolsVersionStatus2 string `xml:"toolsVersionStatus2,omitempty" json:"toolsVersionStatus2,omitempty" vim:"5.0"` + ToolsVersionStatus2 string `xml:"toolsVersionStatus2,omitempty" json:"toolsVersionStatus2,omitempty"` // Current running status of VMware Tools in the guest operating system, // if known. - ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty" json:"toolsRunningStatus,omitempty" vim:"4.0"` + ToolsRunningStatus string `xml:"toolsRunningStatus,omitempty" json:"toolsRunningStatus,omitempty"` // Hostname of the guest operating system, if known. HostName string `xml:"hostName,omitempty" json:"hostName,omitempty"` // Primary IP address assigned to the guest operating system, if known. IpAddress string `xml:"ipAddress,omitempty" json:"ipAddress,omitempty"` // The hardware version string for this virtual machine. - HwVersion string `xml:"hwVersion,omitempty" json:"hwVersion,omitempty" vim:"6.9.1"` + HwVersion string `xml:"hwVersion,omitempty" json:"hwVersion,omitempty"` } func init() { @@ -89682,12 +89270,11 @@ type VirtualMachineImportSpec struct { // for the root node in an ImportSpec tree. // // Refers instance of `ResourcePool`. - ResPoolEntity *ManagedObjectReference `xml:"resPoolEntity,omitempty" json:"resPoolEntity,omitempty" vim:"4.1"` + ResPoolEntity *ManagedObjectReference `xml:"resPoolEntity,omitempty" json:"resPoolEntity,omitempty"` } func init() { t["VirtualMachineImportSpec"] = reflect.TypeOf((*VirtualMachineImportSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineImportSpec"] = "4.0" } // The IndependentFilterSpec data object is used to specify the independent @@ -89721,15 +89308,15 @@ type VirtualMachineInstantCloneSpec struct { // resources the newly created virtual machine will use. // // The location might be empty or specify: - // - The folder where the virtual machine should be located. If not - // specified, the root VM folder of the source VM will be used. - // - A datastore where the InstantCloned virtual machine will be located - // on the physical storage. - // - A resource pool determines where compute resources will be - // available to the clone. - // - A device change specification. The only allowed device changes - // are edits of VirtualEthernetCard and filebacked Serial/Parallel - // ports. + // - The folder where the virtual machine should be located. If not + // specified, the root VM folder of the source VM will be used. + // - A datastore where the InstantCloned virtual machine will be located + // on the physical storage. + // - A resource pool determines where compute resources will be + // available to the clone. + // - A device change specification. The only allowed device changes + // are edits of VirtualEthernetCard and filebacked Serial/Parallel + // ports. // // All other settings are NOT supported. Location VirtualMachineRelocateSpec `xml:"location" json:"location"` @@ -89746,7 +89333,6 @@ type VirtualMachineInstantCloneSpec struct { func init() { t["VirtualMachineInstantCloneSpec"] = reflect.TypeOf((*VirtualMachineInstantCloneSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineInstantCloneSpec"] = "6.7" } // The LegacyNetworkSwitchInfo data object type contains information about @@ -89787,7 +89373,6 @@ type VirtualMachineMemoryReservationInfo struct { func init() { t["VirtualMachineMemoryReservationInfo"] = reflect.TypeOf((*VirtualMachineMemoryReservationInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineMemoryReservationInfo"] = "2.5" } // The VirtualMachineReservationSpec data object specifies @@ -89806,7 +89391,6 @@ type VirtualMachineMemoryReservationSpec struct { func init() { t["VirtualMachineMemoryReservationSpec"] = reflect.TypeOf((*VirtualMachineMemoryReservationSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineMemoryReservationSpec"] = "2.5" } // Message data which is intended to be displayed according @@ -89854,12 +89438,11 @@ type VirtualMachineMessage struct { // // Use `SessionManager*.*SessionManager.SetLocale` to // change the session locale. - Text string `xml:"text,omitempty" json:"text,omitempty" vim:"4.0"` + Text string `xml:"text,omitempty" json:"text,omitempty"` } func init() { t["VirtualMachineMessage"] = reflect.TypeOf((*VirtualMachineMessage)(nil)).Elem() - minAPIVersionForType["VirtualMachineMessage"] = "2.5" } // VmMetadata is a pair of VM ID and opaque metadata. @@ -89878,7 +89461,6 @@ type VirtualMachineMetadataManagerVmMetadata struct { func init() { t["VirtualMachineMetadataManagerVmMetadata"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadata)(nil)).Elem() - minAPIVersionForType["VirtualMachineMetadataManagerVmMetadata"] = "5.5" } // VmMetadataInput specifies the operation and metadata for a @@ -89896,7 +89478,6 @@ type VirtualMachineMetadataManagerVmMetadataInput struct { func init() { t["VirtualMachineMetadataManagerVmMetadataInput"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataInput)(nil)).Elem() - minAPIVersionForType["VirtualMachineMetadataManagerVmMetadataInput"] = "5.5" } // VmMetadataOwner defines the namespace for an owner @@ -89914,7 +89495,6 @@ type VirtualMachineMetadataManagerVmMetadataOwner struct { func init() { t["VirtualMachineMetadataManagerVmMetadataOwner"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataOwner)(nil)).Elem() - minAPIVersionForType["VirtualMachineMetadataManagerVmMetadataOwner"] = "5.5" } // A list of VmMetadataResults are returned for successful and @@ -89933,7 +89513,6 @@ type VirtualMachineMetadataManagerVmMetadataResult struct { func init() { t["VirtualMachineMetadataManagerVmMetadataResult"] = reflect.TypeOf((*VirtualMachineMetadataManagerVmMetadataResult)(nil)).Elem() - minAPIVersionForType["VirtualMachineMetadataManagerVmMetadataResult"] = "5.5" } // The `VirtualMachineMksConnection` object describes an MKS style connection @@ -89976,7 +89555,7 @@ type VirtualMachineMksTicket struct { Port int32 `xml:"port,omitempty" json:"port,omitempty"` // The expected thumbprint of the SSL cert of the host to which // we are connecting. - SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty" vim:"2.5"` + SslThumbprint string `xml:"sslThumbprint,omitempty" json:"sslThumbprint,omitempty"` } func init() { @@ -89991,7 +89570,7 @@ type VirtualMachineNetworkInfo struct { // Information about the network Network BaseNetworkSummary `xml:"network,typeattr" json:"network"` // Key of parent vSwitch of the network - Vswitch string `xml:"vswitch,omitempty" json:"vswitch,omitempty" vim:"6.5"` + Vswitch string `xml:"vswitch,omitempty" json:"vswitch,omitempty"` } func init() { @@ -90042,7 +89621,6 @@ type VirtualMachinePciPassthroughInfo struct { func init() { t["VirtualMachinePciPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciPassthroughInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachinePciPassthroughInfo"] = "4.0" } // Description of a gpu PCI device that can be shared with a virtual machine. @@ -90055,7 +89633,6 @@ type VirtualMachinePciSharedGpuPassthroughInfo struct { func init() { t["VirtualMachinePciSharedGpuPassthroughInfo"] = reflect.TypeOf((*VirtualMachinePciSharedGpuPassthroughInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachinePciSharedGpuPassthroughInfo"] = "6.0" } // The PrecisionClockInfo data object type describes available host @@ -90073,7 +89650,6 @@ type VirtualMachinePrecisionClockInfo struct { func init() { t["VirtualMachinePrecisionClockInfo"] = reflect.TypeOf((*VirtualMachinePrecisionClockInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachinePrecisionClockInfo"] = "7.0" } // The `VirtualMachineProfileDetails` data object type provides details of the policy @@ -90090,7 +89666,6 @@ type VirtualMachineProfileDetails struct { func init() { t["VirtualMachineProfileDetails"] = reflect.TypeOf((*VirtualMachineProfileDetails)(nil)).Elem() - minAPIVersionForType["VirtualMachineProfileDetails"] = "6.7" } // Details of the policies associated with Virtual Disks. @@ -90105,7 +89680,6 @@ type VirtualMachineProfileDetailsDiskProfileDetails struct { func init() { t["VirtualMachineProfileDetailsDiskProfileDetails"] = reflect.TypeOf((*VirtualMachineProfileDetailsDiskProfileDetails)(nil)).Elem() - minAPIVersionForType["VirtualMachineProfileDetailsDiskProfileDetails"] = "6.7" } // The extensible data object type encapsulates additional data specific @@ -90132,7 +89706,6 @@ type VirtualMachineProfileRawData struct { func init() { t["VirtualMachineProfileRawData"] = reflect.TypeOf((*VirtualMachineProfileRawData)(nil)).Elem() - minAPIVersionForType["VirtualMachineProfileRawData"] = "5.5" } // The ProfileSpec data object is used to specify the Storage Policy to be @@ -90143,7 +89716,6 @@ type VirtualMachineProfileSpec struct { func init() { t["VirtualMachineProfileSpec"] = reflect.TypeOf((*VirtualMachineProfileSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineProfileSpec"] = "5.5" } // Data object which represents relations between a @@ -90161,7 +89733,6 @@ type VirtualMachinePropertyRelation struct { func init() { t["VirtualMachinePropertyRelation"] = reflect.TypeOf((*VirtualMachinePropertyRelation)(nil)).Elem() - minAPIVersionForType["VirtualMachinePropertyRelation"] = "6.7" } // This data object type describes the question that is currently @@ -90178,7 +89749,7 @@ type VirtualMachineQuestionInfo struct { // The message data for the individual messages that comprise the question. // // Only available on servers that support localization. - Message []VirtualMachineMessage `xml:"message,omitempty" json:"message,omitempty" vim:"2.5"` + Message []VirtualMachineMessage `xml:"message,omitempty" json:"message,omitempty"` } func init() { @@ -90200,12 +89771,12 @@ type VirtualMachineQuickStats struct { // Basic CPU performance statistics, in MHz. // // Valid while the virtual machine is running. - OverallCpuDemand int32 `xml:"overallCpuDemand,omitempty" json:"overallCpuDemand,omitempty" vim:"4.0"` + OverallCpuDemand int32 `xml:"overallCpuDemand,omitempty" json:"overallCpuDemand,omitempty"` // Percentage of time that the virtual machine was ready, but could not // get scheduled to run on the physical CPU. // // Valid while the virtual machine is running. - OverallCpuReadiness int32 `xml:"overallCpuReadiness,omitempty" json:"overallCpuReadiness,omitempty" vim:"7.0"` + OverallCpuReadiness int32 `xml:"overallCpuReadiness,omitempty" json:"overallCpuReadiness,omitempty"` // Guest memory utilization statistics, in MB. // // This @@ -90243,7 +89814,7 @@ type VirtualMachineQuickStats struct { // case CPU allocation for this virtual machine, that is, the amount of CPU // resource this virtual machine would receive if all virtual machines running // in the cluster went to maximum consumption. Units are MHz. - StaticCpuEntitlement int32 `xml:"staticCpuEntitlement,omitempty" json:"staticCpuEntitlement,omitempty" vim:"4.0"` + StaticCpuEntitlement int32 `xml:"staticCpuEntitlement,omitempty" json:"staticCpuEntitlement,omitempty"` // The static memory resource entitlement for a virtual machine. // // This value is @@ -90252,42 +89823,42 @@ type VirtualMachineQuickStats struct { // case memory allocation for this virtual machine, that is, the amount of // memory this virtual machine would receive if all virtual machines running // in the cluster went to maximum consumption. Units are MB. - StaticMemoryEntitlement int32 `xml:"staticMemoryEntitlement,omitempty" json:"staticMemoryEntitlement,omitempty" vim:"4.0"` + StaticMemoryEntitlement int32 `xml:"staticMemoryEntitlement,omitempty" json:"staticMemoryEntitlement,omitempty"` // Amount of host physical memory that is mapped for a virtual machine, // in MB. // // The number can be between 0 and the configured memory size of // the virtual machine. Valid while the virtual machine is running. - GrantedMemory int32 `xml:"grantedMemory,omitempty" json:"grantedMemory,omitempty" vim:"7.0"` + GrantedMemory int32 `xml:"grantedMemory,omitempty" json:"grantedMemory,omitempty"` // The portion of memory, in MB, that is granted to this VM from non-shared // host memory. - PrivateMemory int32 `xml:"privateMemory,omitempty" json:"privateMemory,omitempty" vim:"4.0"` + PrivateMemory int32 `xml:"privateMemory,omitempty" json:"privateMemory,omitempty"` // The portion of memory, in MB, that is granted to this VM from host memory // that is shared between VMs. - SharedMemory int32 `xml:"sharedMemory,omitempty" json:"sharedMemory,omitempty" vim:"4.0"` + SharedMemory int32 `xml:"sharedMemory,omitempty" json:"sharedMemory,omitempty"` // The portion of memory, in MB, that is granted to this VM from the host's swap // space. // // This is a sign that there is memory pressure on the host. - SwappedMemory int32 `xml:"swappedMemory,omitempty" json:"swappedMemory,omitempty" vim:"4.0"` + SwappedMemory int32 `xml:"swappedMemory,omitempty" json:"swappedMemory,omitempty"` // The size of the balloon driver in the VM, in MB. // // The host will inflate the // balloon driver to reclaim physical memory from the VM. This is a sign that // there is memory pressure on the host. - BalloonedMemory int32 `xml:"balloonedMemory,omitempty" json:"balloonedMemory,omitempty" vim:"4.0"` + BalloonedMemory int32 `xml:"balloonedMemory,omitempty" json:"balloonedMemory,omitempty"` // The amount of consumed overhead memory, in MB, for this VM. - ConsumedOverheadMemory int32 `xml:"consumedOverheadMemory,omitempty" json:"consumedOverheadMemory,omitempty" vim:"4.0"` + ConsumedOverheadMemory int32 `xml:"consumedOverheadMemory,omitempty" json:"consumedOverheadMemory,omitempty"` // The network bandwidth used for logging between the // primary and secondary fault tolerance VMs. // // The unit is kilobytes per second. - FtLogBandwidth int32 `xml:"ftLogBandwidth,omitempty" json:"ftLogBandwidth,omitempty" vim:"4.0"` + FtLogBandwidth int32 `xml:"ftLogBandwidth,omitempty" json:"ftLogBandwidth,omitempty"` // The amount of time in wallclock that the VCPU of the secondary fault // tolerance VM is behind the VCPU of the primary VM. // // The unit is millisecond. - FtSecondaryLatency int32 `xml:"ftSecondaryLatency,omitempty" json:"ftSecondaryLatency,omitempty" vim:"4.0"` + FtSecondaryLatency int32 `xml:"ftSecondaryLatency,omitempty" json:"ftSecondaryLatency,omitempty"` // The latency status of the fault tolerance VM. // // ftLatencyStatus is determined by the value of ftSecondaryLatency. @@ -90297,14 +89868,14 @@ type VirtualMachineQuickStats struct { // and less than or equal to 6 seconds; // red, if ftSecondaryLatency is greater than 6 seconds; // gray, if ftSecondaryLatency is unknown. - FtLatencyStatus ManagedEntityStatus `xml:"ftLatencyStatus,omitempty" json:"ftLatencyStatus,omitempty" vim:"4.0"` + FtLatencyStatus ManagedEntityStatus `xml:"ftLatencyStatus,omitempty" json:"ftLatencyStatus,omitempty"` // The amount of compressed memory currently consumed by VM, in Kb. - CompressedMemory int64 `xml:"compressedMemory,omitempty" json:"compressedMemory,omitempty" vim:"4.1"` + CompressedMemory int64 `xml:"compressedMemory,omitempty" json:"compressedMemory,omitempty"` // The system uptime of the VM in seconds. - UptimeSeconds int32 `xml:"uptimeSeconds,omitempty" json:"uptimeSeconds,omitempty" vim:"4.1"` + UptimeSeconds int32 `xml:"uptimeSeconds,omitempty" json:"uptimeSeconds,omitempty"` // The amount of memory swapped to fast disk device such as // SSD, in KB. - SsdSwappedMemory int64 `xml:"ssdSwappedMemory,omitempty" json:"ssdSwappedMemory,omitempty" vim:"5.0"` + SsdSwappedMemory int64 `xml:"ssdSwappedMemory,omitempty" json:"ssdSwappedMemory,omitempty"` // The amount of memory that was recently touched by the VM, in MB. ActiveMemory int32 `xml:"activeMemory,omitempty" json:"activeMemory,omitempty" vim:"7.0.3.0"` // Stats for each physical memory tier. @@ -90335,6 +89906,7 @@ type VirtualMachineQuickStatsMemoryTierStats struct { func init() { t["VirtualMachineQuickStatsMemoryTierStats"] = reflect.TypeOf((*VirtualMachineQuickStatsMemoryTierStats)(nil)).Elem() + minAPIVersionForType["VirtualMachineQuickStatsMemoryTierStats"] = "7.0.3.0" } // Specification for moving or copying a virtual machine to a different datastore @@ -90349,14 +89921,14 @@ type VirtualMachineRelocateSpec struct { // virtual machine is relocated to a different vCenter service, the // destination host, pool, and datastore parameters have to be explicitly // specified by default when the task is submitted. - Service *ServiceLocator `xml:"service,omitempty" json:"service,omitempty" vim:"6.0"` + Service *ServiceLocator `xml:"service,omitempty" json:"service,omitempty"` // The folder where the virtual machine should be located. // // If not specified, // the root VM folder of the destination datacenter will be used. // // Refers instance of `Folder`. - Folder *ManagedObjectReference `xml:"folder,omitempty" json:"folder,omitempty" vim:"6.0"` + Folder *ManagedObjectReference `xml:"folder,omitempty" json:"folder,omitempty"` // The datastore where the virtual machine should be located. // // If @@ -90382,43 +89954,43 @@ type VirtualMachineRelocateSpec struct { // If left unset then // `moveAllDiskBackingsAndDisallowSharing` // is assumed. - DiskMoveType string `xml:"diskMoveType,omitempty" json:"diskMoveType,omitempty" vim:"4.0"` + DiskMoveType string `xml:"diskMoveType,omitempty" json:"diskMoveType,omitempty"` // The resource pool to which this virtual machine should be attached. - // - For a relocate or clone operation to a virtual machine, if the - // argument is not supplied, the current resource pool of virtual - // machine is used. - // - For a clone operation from a template to a virtual machine, - // this argument is required. - // - If the virtual machine is relocated to a different vCenter service, - // and a resource pool is not specified, the destination host must be - // specified. - // - If a resource pool is specified, the virtual machine is powered - // on, and the target pool represents a cluster without DRS enabled, - // an InvalidArgument exception is thrown. - // - If the virtual machine is relocated to a different datacenter - // within the vCenter service, the resource pool has to be specified - // and cannot be unset. + // - For a relocate or clone operation to a virtual machine, if the + // argument is not supplied, the current resource pool of virtual + // machine is used. + // - For a clone operation from a template to a virtual machine, + // this argument is required. + // - If the virtual machine is relocated to a different vCenter service, + // and a resource pool is not specified, the destination host must be + // specified. + // - If a resource pool is specified, the virtual machine is powered + // on, and the target pool represents a cluster without DRS enabled, + // an InvalidArgument exception is thrown. + // - If the virtual machine is relocated to a different datacenter + // within the vCenter service, the resource pool has to be specified + // and cannot be unset. // // Refers instance of `ResourcePool`. Pool *ManagedObjectReference `xml:"pool,omitempty" json:"pool,omitempty"` // The target host for the virtual machine. // // If not specified, - // - if resource pool is not specified, current host is used. - // - if resource pool is specified, and the target pool represents - // a stand-alone host, the host is used. - // - if resource pool is specified, the virtual machine is powered on, - // and the target pool represents a DRS-enabled cluster, a host - // selected by DRS is used. - // - if resource pool is specified, the virtual machine is powered on, - // and the target pool represents a cluster without DRS enabled, - // an InvalidArgument exception is thrown. - // - if a resource pool is specified, the target pool represents a - // cluster, and this is a clone or the virtual machine is powered - // off, a random compatible host is chosen. - // - A destination host must be specified if the virtual machine is - // relocated to a different vCenter service, and a resource pool is - // not specified. + // - if resource pool is not specified, current host is used. + // - if resource pool is specified, and the target pool represents + // a stand-alone host, the host is used. + // - if resource pool is specified, the virtual machine is powered on, + // and the target pool represents a DRS-enabled cluster, a host + // selected by DRS is used. + // - if resource pool is specified, the virtual machine is powered on, + // and the target pool represents a cluster without DRS enabled, + // an InvalidArgument exception is thrown. + // - if a resource pool is specified, the target pool represents a + // cluster, and this is a clone or the virtual machine is powered + // off, a random compatible host is chosen. + // - A destination host must be specified if the virtual machine is + // relocated to a different vCenter service, and a resource pool is + // not specified. // // Refers instance of `HostSystem`. Host *ManagedObjectReference `xml:"host,omitempty" json:"host,omitempty"` @@ -90442,17 +90014,17 @@ type VirtualMachineRelocateSpec struct { // device locations for the relocate operation. // // The supported device changes are: - // - For `VirtualEthernetCard`, it has to be used - // in `VirtualDeviceConfigSpec.device` to specify the - // target network backing. - // - For `VirtualDisk`, it can be used to specify - // vFlash cache configuration, or the storage profile for destination - // disks. The storage profiles are used to either upgrade the virtual - // disk's storage to a persistent memory, or keep the virtual disk - // in persistent memory when moving the virtual machine's overall - // storage. - // - All other specification are ignored. - DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr" json:"deviceChange,omitempty" vim:"5.5"` + // - For `VirtualEthernetCard`, it has to be used + // in `VirtualDeviceConfigSpec.device` to specify the + // target network backing. + // - For `VirtualDisk`, it can be used to specify + // vFlash cache configuration, or the storage profile for destination + // disks. The storage profiles are used to either upgrade the virtual + // disk's storage to a persistent memory, or keep the virtual disk + // in persistent memory when moving the virtual machine's overall + // storage. + // - All other specification are ignored. + DeviceChange []BaseVirtualDeviceConfigSpec `xml:"deviceChange,omitempty,typeattr" json:"deviceChange,omitempty"` // Storage profile requirement for Virtual Machine's home directory. // // Profiles are solution specific. @@ -90461,12 +90033,12 @@ type VirtualMachineRelocateSpec struct { // interact with SPBM. // This is an optional parameter and if user doesn't specify profile, // the default behavior will apply. - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty" vim:"5.5"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty"` // Virtual Machine cryptographic options. // // Encryption requirement for the virtual machine's metadata // files (non-disk files). - CryptoSpec BaseCryptoSpec `xml:"cryptoSpec,omitempty,typeattr" json:"cryptoSpec,omitempty" vim:"7.0"` + CryptoSpec BaseCryptoSpec `xml:"cryptoSpec,omitempty,typeattr" json:"cryptoSpec,omitempty"` } func init() { @@ -90493,7 +90065,7 @@ type VirtualMachineRelocateSpecDiskLocator struct { // // If left unset then `moveAllDiskBackingsAndDisallowSharing` // is assumed. - DiskMoveType string `xml:"diskMoveType,omitempty" json:"diskMoveType,omitempty" vim:"4.0"` + DiskMoveType string `xml:"diskMoveType,omitempty" json:"diskMoveType,omitempty"` // Backing information for the virtual disk at the destination. // // This can be used, for instance, to change the format of the @@ -90502,12 +90074,12 @@ type VirtualMachineRelocateSpecDiskLocator struct { // changes may be ignored if they are not supported. // // Supported BackingInfo types and properties: - // - `VirtualDiskFlatVer2BackingInfo` - // - thinProvisioned - // - eagerlyScrub - // - `VirtualDiskSeSparseBackingInfo` - // (ESX 5.1 or later) - DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr" json:"diskBackingInfo,omitempty" vim:"5.0"` + // - `VirtualDiskFlatVer2BackingInfo` + // - thinProvisioned + // - eagerlyScrub + // - `VirtualDiskSeSparseBackingInfo` + // (ESX 5.1 or later) + DiskBackingInfo BaseVirtualDeviceBackingInfo `xml:"diskBackingInfo,omitempty,typeattr" json:"diskBackingInfo,omitempty"` // Virtual Disk Profile requirement. // // Profiles are solution specific. @@ -90516,9 +90088,9 @@ type VirtualMachineRelocateSpecDiskLocator struct { // interact with it. // This is an optional parameter and if user doesn't specify profile, // the default behavior will apply. - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty" vim:"5.5"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty"` // Cryptographic option of the current disk. - Backing *VirtualMachineRelocateSpecDiskLocatorBackingSpec `xml:"backing,omitempty" json:"backing,omitempty" vim:"7.0"` + Backing *VirtualMachineRelocateSpecDiskLocatorBackingSpec `xml:"backing,omitempty" json:"backing,omitempty"` // List of independent filters `VirtualMachineIndependentFilterSpec` // to be configured on the virtual disk after the relocate. FilterSpec []BaseVirtualMachineBaseIndependentFilterSpec `xml:"filterSpec,omitempty,typeattr" json:"filterSpec,omitempty" vim:"7.0.2.1"` @@ -90543,7 +90115,6 @@ type VirtualMachineRelocateSpecDiskLocatorBackingSpec struct { func init() { t["VirtualMachineRelocateSpecDiskLocatorBackingSpec"] = reflect.TypeOf((*VirtualMachineRelocateSpecDiskLocatorBackingSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineRelocateSpecDiskLocatorBackingSpec"] = "7.0" } // The RuntimeInfo data object type provides information about @@ -90566,7 +90137,7 @@ type VirtualMachineRuntimeInfo struct { // contain values for this property when some other property on the DataObject changes. // If this update is a result of a call to WaitForUpdatesEx with a non-empty // version parameter, the value for this property may not be current. - Device []VirtualMachineDeviceRuntimeInfo `xml:"device,omitempty" json:"device,omitempty" vim:"4.1"` + Device []VirtualMachineDeviceRuntimeInfo `xml:"device,omitempty" json:"device,omitempty"` // The host that is responsible for running a virtual machine. // // This property is null if the virtual machine is not running and is @@ -90581,12 +90152,12 @@ type VirtualMachineRuntimeInfo struct { // Represents if the vm is currently being failed over by FDM VmFailoverInProgress *bool `xml:"vmFailoverInProgress" json:"vmFailoverInProgress,omitempty" vim:"7.0.2.0"` // The fault tolerance state of the virtual machine. - FaultToleranceState VirtualMachineFaultToleranceState `xml:"faultToleranceState,omitempty" json:"faultToleranceState,omitempty" vim:"4.0"` + FaultToleranceState VirtualMachineFaultToleranceState `xml:"faultToleranceState,omitempty" json:"faultToleranceState,omitempty"` // The vSphere HA protection state for a virtual machine. // // Property // is unset if vSphere HA is not enabled. - DasVmProtection *VirtualMachineRuntimeInfoDasProtectionState `xml:"dasVmProtection,omitempty" json:"dasVmProtection,omitempty" vim:"5.0"` + DasVmProtection *VirtualMachineRuntimeInfoDasProtectionState `xml:"dasVmProtection,omitempty" json:"dasVmProtection,omitempty"` // Flag to indicate whether or not the VMware Tools installer // is mounted as a CD-ROM. ToolsInstallerMounted bool `xml:"toolsInstallerMounted" json:"toolsInstallerMounted"` @@ -90668,18 +90239,18 @@ type VirtualMachineRuntimeInfo struct { // Deprecated as of vSphere API 6.0. // // Record / replay state of this virtual machine. - RecordReplayState VirtualMachineRecordReplayState `xml:"recordReplayState,omitempty" json:"recordReplayState,omitempty" vim:"4.0"` + RecordReplayState VirtualMachineRecordReplayState `xml:"recordReplayState,omitempty" json:"recordReplayState,omitempty"` // For a powered off virtual machine, indicates whether the virtual // machine's last shutdown was an orderly power off or not. // // Unset if // the virtual machine is running or suspended. - CleanPowerOff *bool `xml:"cleanPowerOff" json:"cleanPowerOff,omitempty" vim:"4.0"` + CleanPowerOff *bool `xml:"cleanPowerOff" json:"cleanPowerOff,omitempty"` // If set, indicates the reason the virtual machine needs a secondary. - NeedSecondaryReason string `xml:"needSecondaryReason,omitempty" json:"needSecondaryReason,omitempty" vim:"4.0"` + NeedSecondaryReason string `xml:"needSecondaryReason,omitempty" json:"needSecondaryReason,omitempty"` // This property indicates whether the guest has gone into one of the // s1, s2 or s3 standby modes, false indicates the guest is awake. - OnlineStandby *bool `xml:"onlineStandby" json:"onlineStandby,omitempty" vim:"5.1"` + OnlineStandby *bool `xml:"onlineStandby" json:"onlineStandby,omitempty"` // For a powered-on or suspended virtual machine in a cluster with Enhanced // VMotion Compatibility (EVC) enabled, this identifies the least-featured // EVC mode (among those for the appropriate CPU vendor) that could admit @@ -90698,48 +90269,48 @@ type VirtualMachineRuntimeInfo struct { // (in the default masks for the // `GuestOsDescriptor` appropriate for the // virtual machine's configured guest OS). - MinRequiredEVCModeKey string `xml:"minRequiredEVCModeKey,omitempty" json:"minRequiredEVCModeKey,omitempty" vim:"4.1"` + MinRequiredEVCModeKey string `xml:"minRequiredEVCModeKey,omitempty" json:"minRequiredEVCModeKey,omitempty"` // Whether any disk of the virtual machine requires consolidation. // // This can happen for example when a snapshot is deleted but its // associated disk is not committed back to the base disk. // Use `VirtualMachine.ConsolidateVMDisks_Task` to consolidate if // needed. - ConsolidationNeeded *bool `xml:"consolidationNeeded" json:"consolidationNeeded,omitempty" vim:"5.0"` + ConsolidationNeeded *bool `xml:"consolidationNeeded" json:"consolidationNeeded,omitempty"` // These requirements must have equivalent host capabilities // `HostConfigInfo.featureCapability` in order to power on. - OfflineFeatureRequirement []VirtualMachineFeatureRequirement `xml:"offlineFeatureRequirement,omitempty" json:"offlineFeatureRequirement,omitempty" vim:"5.1"` + OfflineFeatureRequirement []VirtualMachineFeatureRequirement `xml:"offlineFeatureRequirement,omitempty" json:"offlineFeatureRequirement,omitempty"` // These requirements must have equivalent host capabilities // `HostConfigInfo.featureCapability` in order to power on, // resume, or migrate to the host. - FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty" json:"featureRequirement,omitempty" vim:"5.1"` + FeatureRequirement []VirtualMachineFeatureRequirement `xml:"featureRequirement,omitempty" json:"featureRequirement,omitempty"` // The masks applied to an individual virtual machine as a result of its // configuration. - FeatureMask []HostFeatureMask `xml:"featureMask,omitempty" json:"featureMask,omitempty" vim:"5.1"` + FeatureMask []HostFeatureMask `xml:"featureMask,omitempty" json:"featureMask,omitempty"` // Deprecated since vSphere 7.0 because vFlash Read Cache // end of availability. // // Specifies the total allocated vFlash resource for the vFlash caches associated with VM's // VMDKs when VM is powered on, in bytes. - VFlashCacheAllocation int64 `xml:"vFlashCacheAllocation,omitempty" json:"vFlashCacheAllocation,omitempty" vim:"5.5"` + VFlashCacheAllocation int64 `xml:"vFlashCacheAllocation,omitempty" json:"vFlashCacheAllocation,omitempty"` // Whether the virtual machine is paused, or not. - Paused *bool `xml:"paused" json:"paused,omitempty" vim:"6.0"` + Paused *bool `xml:"paused" json:"paused,omitempty"` // Whether a snapshot operation is in progress in the background, or not. - SnapshotInBackground *bool `xml:"snapshotInBackground" json:"snapshotInBackground,omitempty" vim:"6.0"` + SnapshotInBackground *bool `xml:"snapshotInBackground" json:"snapshotInBackground,omitempty"` // This flag indicates whether a parent virtual machine is in a fork ready // state. // // A persistent instant clone child can be created only when this flag // is true. While a non-persistent instant clone child can be created // independent of this flag, it can only be powered on if this flag is true. - QuiescedForkParent *bool `xml:"quiescedForkParent" json:"quiescedForkParent,omitempty" vim:"6.0"` + QuiescedForkParent *bool `xml:"quiescedForkParent" json:"quiescedForkParent,omitempty"` // Whether the virtual machine is frozen for instant clone, or not. - InstantCloneFrozen *bool `xml:"instantCloneFrozen" json:"instantCloneFrozen,omitempty" vim:"6.7"` + InstantCloneFrozen *bool `xml:"instantCloneFrozen" json:"instantCloneFrozen,omitempty"` // Encryption state of the virtual machine. // // Valid values are enumerated by the // `CryptoState` type. - CryptoState string `xml:"cryptoState,omitempty" json:"cryptoState,omitempty" vim:"6.7"` + CryptoState string `xml:"cryptoState,omitempty" json:"cryptoState,omitempty"` // Whether the virtual machine is suspended to memory, or not. SuspendedToMemory *bool `xml:"suspendedToMemory" json:"suspendedToMemory,omitempty" vim:"7.0.2.0"` // Operation notification timeout in seconds. @@ -90778,7 +90349,6 @@ type VirtualMachineRuntimeInfoDasProtectionState struct { func init() { t["VirtualMachineRuntimeInfoDasProtectionState"] = reflect.TypeOf((*VirtualMachineRuntimeInfoDasProtectionState)(nil)).Elem() - minAPIVersionForType["VirtualMachineRuntimeInfoDasProtectionState"] = "5.0" } // The ScsiDiskDeviceInfo class contains detailed information about a specific @@ -90797,14 +90367,14 @@ type VirtualMachineScsiDiskDeviceInfo struct { // correlate this device with a host physical disk, use the disk property. // This identifier is intended as a hint to end users to identify the // disk device. - TransportHint string `xml:"transportHint,omitempty" json:"transportHint,omitempty" vim:"4.0"` + TransportHint string `xml:"transportHint,omitempty" json:"transportHint,omitempty"` // LUN number hint used to identify the SCSI device. // // To definitively // correlate this device with a host physical disk, use the disk property. // This identifier is intended as a hint to end users to identify the // disk device. - LunNumber int32 `xml:"lunNumber,omitempty" json:"lunNumber,omitempty" vim:"4.0"` + LunNumber int32 `xml:"lunNumber,omitempty" json:"lunNumber,omitempty"` } func init() { @@ -90865,7 +90435,6 @@ type VirtualMachineSgxInfo struct { func init() { t["VirtualMachineSgxInfo"] = reflect.TypeOf((*VirtualMachineSgxInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineSgxInfo"] = "7.0" } // Description of Intel Software Guard Extensions information. @@ -90888,7 +90457,6 @@ type VirtualMachineSgxTargetInfo struct { func init() { t["VirtualMachineSgxTargetInfo"] = reflect.TypeOf((*VirtualMachineSgxTargetInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineSgxTargetInfo"] = "7.0" } // The SnapshotInfo data object type provides all the information about the @@ -90932,7 +90500,7 @@ type VirtualMachineSnapshotTree struct { Description string `xml:"description" json:"description"` // The unique identifier that distinguishes this snapshot from // other snapshots of the virtual machine. - Id int32 `xml:"id,omitempty" json:"id,omitempty" vim:"4.0"` + Id int32 `xml:"id,omitempty" json:"id,omitempty"` // The date and time the snapshot was taken. CreateTime time.Time `xml:"createTime" json:"createTime"` // The power state of the virtual machine when this snapshot was taken. @@ -90944,14 +90512,14 @@ type VirtualMachineSnapshotTree struct { // manifest. // // Available for certain quiesced snapshots only. - BackupManifest string `xml:"backupManifest,omitempty" json:"backupManifest,omitempty" vim:"2.5 U2"` + BackupManifest string `xml:"backupManifest,omitempty" json:"backupManifest,omitempty"` // The snapshot data for all snapshots for which this snapshot is the parent. ChildSnapshotList []VirtualMachineSnapshotTree `xml:"childSnapshotList,omitempty" json:"childSnapshotList,omitempty"` // Deprecated as of vSphere API 6.0. // // Flag to indicate whether this snapshot is associated with a recording // session on the virtual machine that can be replayed. - ReplaySupported *bool `xml:"replaySupported" json:"replaySupported,omitempty" vim:"4.0"` + ReplaySupported *bool `xml:"replaySupported" json:"replaySupported,omitempty"` } func init() { @@ -90966,7 +90534,6 @@ type VirtualMachineSoundInfo struct { func init() { t["VirtualMachineSoundInfo"] = reflect.TypeOf((*VirtualMachineSoundInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineSoundInfo"] = "2.5" } type VirtualMachineSriovDevicePoolInfo struct { @@ -90991,12 +90558,11 @@ type VirtualMachineSriovInfo struct { // capable physical function. Pnic string `xml:"pnic,omitempty" json:"pnic,omitempty"` // SRIOV DevicePool information - DevicePool BaseVirtualMachineSriovDevicePoolInfo `xml:"devicePool,omitempty,typeattr" json:"devicePool,omitempty" vim:"6.5"` + DevicePool BaseVirtualMachineSriovDevicePoolInfo `xml:"devicePool,omitempty,typeattr" json:"devicePool,omitempty"` } func init() { t["VirtualMachineSriovInfo"] = reflect.TypeOf((*VirtualMachineSriovInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineSriovInfo"] = "5.5" } // This class is networking specific SR-IOV device pool info @@ -91011,7 +90577,6 @@ type VirtualMachineSriovNetworkDevicePoolInfo struct { func init() { t["VirtualMachineSriovNetworkDevicePoolInfo"] = reflect.TypeOf((*VirtualMachineSriovNetworkDevicePoolInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineSriovNetworkDevicePoolInfo"] = "6.5" } // Information about the amount of storage used by a virtual machine across @@ -91034,7 +90599,6 @@ type VirtualMachineStorageInfo struct { func init() { t["VirtualMachineStorageInfo"] = reflect.TypeOf((*VirtualMachineStorageInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineStorageInfo"] = "4.0" } // A subset of the storage information of this virtual machine. @@ -91066,7 +90630,6 @@ type VirtualMachineStorageSummary struct { func init() { t["VirtualMachineStorageSummary"] = reflect.TypeOf((*VirtualMachineStorageSummary)(nil)).Elem() - minAPIVersionForType["VirtualMachineStorageSummary"] = "4.0" } // The summary data object type encapsulates a typical set of virtual machine @@ -91110,7 +90673,7 @@ type VirtualMachineSummary struct { // contain values for this property when some other property on the DataObject changes. // If this update is a result of a call to WaitForUpdatesEx with a non-empty // version parameter, the value for this property may not be current. - Storage *VirtualMachineStorageSummary `xml:"storage,omitempty" json:"storage,omitempty" vim:"4.0"` + Storage *VirtualMachineStorageSummary `xml:"storage,omitempty" json:"storage,omitempty"` // A set of statistics that are typically updated with near real-time regularity. // // This data object type does not support notification, for scalability reasons. @@ -91202,12 +90765,11 @@ type VirtualMachineTicket struct { // // Some tickets are "websocket" tickets and are best expressed // as a URL. - Url string `xml:"url,omitempty" json:"url,omitempty" vim:"7.0"` + Url string `xml:"url,omitempty" json:"url,omitempty"` } func init() { t["VirtualMachineTicket"] = reflect.TypeOf((*VirtualMachineTicket)(nil)).Elem() - minAPIVersionForType["VirtualMachineTicket"] = "4.1" } // Storage space used by this virtual machine on a particular datastore. @@ -91244,7 +90806,6 @@ type VirtualMachineUsageOnDatastore struct { func init() { t["VirtualMachineUsageOnDatastore"] = reflect.TypeOf((*VirtualMachineUsageOnDatastore)(nil)).Elem() - minAPIVersionForType["VirtualMachineUsageOnDatastore"] = "4.0" } // This data object contains information about a physical USB device @@ -91281,7 +90842,6 @@ type VirtualMachineUsbInfo struct { func init() { t["VirtualMachineUsbInfo"] = reflect.TypeOf((*VirtualMachineUsbInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineUsbInfo"] = "2.5" } // VFlashModuleInfo class contains information about a vFlash module @@ -91295,7 +90855,6 @@ type VirtualMachineVFlashModuleInfo struct { func init() { t["VirtualMachineVFlashModuleInfo"] = reflect.TypeOf((*VirtualMachineVFlashModuleInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineVFlashModuleInfo"] = "5.5" } // The `VirtualMachineVMCIDevice` data object represents @@ -91353,15 +90912,14 @@ type VirtualMachineVMCIDevice struct { // // Set this property to enable or disable filter rules as specified // in `VirtualMachineVMCIDevice.filterInfo`. - FilterEnable *bool `xml:"filterEnable" json:"filterEnable,omitempty" vim:"6.0"` + FilterEnable *bool `xml:"filterEnable" json:"filterEnable,omitempty"` // Specify a `VirtualMachineVMCIDeviceFilterInfo` data object that controls the extent of // VMCI communication with this virtual machine. - FilterInfo *VirtualMachineVMCIDeviceFilterInfo `xml:"filterInfo,omitempty" json:"filterInfo,omitempty" vim:"6.0"` + FilterInfo *VirtualMachineVMCIDeviceFilterInfo `xml:"filterInfo,omitempty" json:"filterInfo,omitempty"` } func init() { t["VirtualMachineVMCIDevice"] = reflect.TypeOf((*VirtualMachineVMCIDevice)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMCIDevice"] = "2.5 U2" } // The `VirtualMachineVMCIDeviceFilterInfo` data object contains an array of filters. @@ -91377,7 +90935,6 @@ type VirtualMachineVMCIDeviceFilterInfo struct { func init() { t["VirtualMachineVMCIDeviceFilterInfo"] = reflect.TypeOf((*VirtualMachineVMCIDeviceFilterInfo)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMCIDeviceFilterInfo"] = "6.0" } // The `VirtualMachineVMCIDeviceFilterSpec` data object describes a filter based on protocol, @@ -91418,7 +90975,6 @@ type VirtualMachineVMCIDeviceFilterSpec struct { func init() { t["VirtualMachineVMCIDeviceFilterSpec"] = reflect.TypeOf((*VirtualMachineVMCIDeviceFilterSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMCIDeviceFilterSpec"] = "6.0" } // The `VirtualMachineVMCIDeviceOption` data object contains the options @@ -91438,19 +90994,18 @@ type VirtualMachineVMCIDeviceOption struct { // no effect on these platforms. AllowUnrestrictedCommunication BoolOption `xml:"allowUnrestrictedCommunication" json:"allowUnrestrictedCommunication"` // Filter specification options. - FilterSpecOption *VirtualMachineVMCIDeviceOptionFilterSpecOption `xml:"filterSpecOption,omitempty" json:"filterSpecOption,omitempty" vim:"6.0"` + FilterSpecOption *VirtualMachineVMCIDeviceOptionFilterSpecOption `xml:"filterSpecOption,omitempty" json:"filterSpecOption,omitempty"` // Indicates support for VMCI firewall filters and specifies the default // operation. // // If `BoolOption.supported` is set to true, // then firewall filtering can be used for this virtual machine to allow // or deny traffic over VMCI. - FilterSupported *BoolOption `xml:"filterSupported,omitempty" json:"filterSupported,omitempty" vim:"6.0"` + FilterSupported *BoolOption `xml:"filterSupported,omitempty" json:"filterSupported,omitempty"` } func init() { t["VirtualMachineVMCIDeviceOption"] = reflect.TypeOf((*VirtualMachineVMCIDeviceOption)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMCIDeviceOption"] = "2.5 U2" } // Filter specification options. @@ -91477,7 +91032,6 @@ type VirtualMachineVMCIDeviceOptionFilterSpecOption struct { func init() { t["VirtualMachineVMCIDeviceOptionFilterSpecOption"] = reflect.TypeOf((*VirtualMachineVMCIDeviceOptionFilterSpecOption)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMCIDeviceOptionFilterSpecOption"] = "6.0" } // Deprecated as of vSphere API 6.0. On vSphere 6.0 and later @@ -91491,7 +91045,6 @@ type VirtualMachineVMIROM struct { func init() { t["VirtualMachineVMIROM"] = reflect.TypeOf((*VirtualMachineVMIROM)(nil)).Elem() - minAPIVersionForType["VirtualMachineVMIROM"] = "2.5" } // Description of VMotion Stun Time. @@ -91532,7 +91085,6 @@ type VirtualMachineVcpuConfig struct { func init() { t["VirtualMachineVcpuConfig"] = reflect.TypeOf((*VirtualMachineVcpuConfig)(nil)).Elem() - minAPIVersionForType["VirtualMachineVcpuConfig"] = "7.0" } // Description of a PCI vendor device group device. @@ -91645,19 +91197,19 @@ type VirtualMachineVideoCard struct { // bounded by the video RAM size of the virtual video card. // This property can only be updated when the virtual machine is // powered off. - NumDisplays int32 `xml:"numDisplays,omitempty" json:"numDisplays,omitempty" vim:"2.5 U2"` + NumDisplays int32 `xml:"numDisplays,omitempty" json:"numDisplays,omitempty"` // Flag to indicate whether the display settings of the host on which the // virtual machine is running should be used to automatically determine // the display settings of the virtual machine's video card. // // This setting takes effect at virtual machine power-on time. If this // value is set to TRUE, numDisplays will be ignored. - UseAutoDetect *bool `xml:"useAutoDetect" json:"useAutoDetect,omitempty" vim:"2.5 U2"` + UseAutoDetect *bool `xml:"useAutoDetect" json:"useAutoDetect,omitempty"` // Flag to indicate whether the virtual video card supports 3D functions. // // This property can only be updated when the virtual machine is powered // off. - Enable3DSupport *bool `xml:"enable3DSupport" json:"enable3DSupport,omitempty" vim:"2.5 U2"` + Enable3DSupport *bool `xml:"enable3DSupport" json:"enable3DSupport,omitempty"` // Indicate how the virtual video device renders 3D graphics. // // The virtual video device can use hardware acceleration and software @@ -91674,14 +91226,14 @@ type VirtualMachineVideoCard struct { // will not attempt to use hardware acceleration. // (hardware) - The virtual device will use hardware acceleration and // will not activate without it. - Use3dRenderer string `xml:"use3dRenderer,omitempty" json:"use3dRenderer,omitempty" vim:"5.1"` + Use3dRenderer string `xml:"use3dRenderer,omitempty" json:"use3dRenderer,omitempty"` // The size of graphics memory. // // If 3d support is enabled this setting gives the amount of guest memory // used for graphics resources. // This property can only be updated when the virtual machine is // powered off. - GraphicsMemorySizeInKB int64 `xml:"graphicsMemorySizeInKB,omitempty" json:"graphicsMemorySizeInKB,omitempty" vim:"6.0"` + GraphicsMemorySizeInKB int64 `xml:"graphicsMemorySizeInKB,omitempty" json:"graphicsMemorySizeInKB,omitempty"` } func init() { @@ -91898,7 +91450,6 @@ type VirtualMachineWindowsQuiesceSpec struct { func init() { t["VirtualMachineWindowsQuiesceSpec"] = reflect.TypeOf((*VirtualMachineWindowsQuiesceSpec)(nil)).Elem() - minAPIVersionForType["VirtualMachineWindowsQuiesceSpec"] = "6.5" } // Data structure used by wipeDisk to return the amount of disk space that @@ -91915,7 +91466,6 @@ type VirtualMachineWipeResult struct { func init() { t["VirtualMachineWipeResult"] = reflect.TypeOf((*VirtualMachineWipeResult)(nil)).Elem() - minAPIVersionForType["VirtualMachineWipeResult"] = "5.1" } // The Virtual NVDIMM device. @@ -91933,7 +91483,6 @@ type VirtualNVDIMM struct { func init() { t["VirtualNVDIMM"] = reflect.TypeOf((*VirtualNVDIMM)(nil)).Elem() - minAPIVersionForType["VirtualNVDIMM"] = "6.7" } // The `VirtualNVDIMMBackingInfo` data object type @@ -91955,7 +91504,6 @@ type VirtualNVDIMMBackingInfo struct { func init() { t["VirtualNVDIMMBackingInfo"] = reflect.TypeOf((*VirtualNVDIMMBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualNVDIMMBackingInfo"] = "6.7" } // The Virtual NVDIMM controller. @@ -91965,7 +91513,6 @@ type VirtualNVDIMMController struct { func init() { t["VirtualNVDIMMController"] = reflect.TypeOf((*VirtualNVDIMMController)(nil)).Elem() - minAPIVersionForType["VirtualNVDIMMController"] = "6.7" } // VirtualNVDIMMControllerOption is the data object that contains @@ -91979,7 +91526,6 @@ type VirtualNVDIMMControllerOption struct { func init() { t["VirtualNVDIMMControllerOption"] = reflect.TypeOf((*VirtualNVDIMMControllerOption)(nil)).Elem() - minAPIVersionForType["VirtualNVDIMMControllerOption"] = "6.7" } // The VirtualNVDIMMOption contains information about @@ -92003,7 +91549,6 @@ type VirtualNVDIMMOption struct { func init() { t["VirtualNVDIMMOption"] = reflect.TypeOf((*VirtualNVDIMMOption)(nil)).Elem() - minAPIVersionForType["VirtualNVDIMMOption"] = "6.7" } // The Virtual NVME controller. @@ -92021,7 +91566,6 @@ type VirtualNVMEController struct { func init() { t["VirtualNVMEController"] = reflect.TypeOf((*VirtualNVMEController)(nil)).Elem() - minAPIVersionForType["VirtualNVMEController"] = "6.5" } // VirtualNVMEControllerOption is the data object that contains @@ -92045,7 +91589,6 @@ type VirtualNVMEControllerOption struct { func init() { t["VirtualNVMEControllerOption"] = reflect.TypeOf((*VirtualNVMEControllerOption)(nil)).Elem() - minAPIVersionForType["VirtualNVMEControllerOption"] = "6.5" } // The NetConfig data object type contains the networking @@ -92068,7 +91611,6 @@ type VirtualNicManagerNetConfig struct { func init() { t["VirtualNicManagerNetConfig"] = reflect.TypeOf((*VirtualNicManagerNetConfig)(nil)).Elem() - minAPIVersionForType["VirtualNicManagerNetConfig"] = "4.0" } // The VirtualPCIController data object type defines a virtual PCI @@ -92124,21 +91666,21 @@ type VirtualPCIControllerOption struct { // // This is also limited // by the number of available slots in the PCI controller. - NumVmiRoms IntOption `xml:"numVmiRoms" json:"numVmiRoms" vim:"2.5"` + NumVmiRoms IntOption `xml:"numVmiRoms" json:"numVmiRoms"` // Defines the minimum, maximum, and default // number of VirtualVMCIDevice instances available, // at any given time, in the PCI controller. // // This is also limited // by the number of available slots in the PCI controller. - NumVmciDevices *IntOption `xml:"numVmciDevices,omitempty" json:"numVmciDevices,omitempty" vim:"2.5 U2"` + NumVmciDevices *IntOption `xml:"numVmciDevices,omitempty" json:"numVmciDevices,omitempty"` // Defines the minimum, maximum, and default // number of VirtualPCIPassthrough instances available, // at any given time, in the PCI controller. // // This is also limited // by the number of available PCI Express slots in the PCI controller. - NumPCIPassthroughDevices *IntOption `xml:"numPCIPassthroughDevices,omitempty" json:"numPCIPassthroughDevices,omitempty" vim:"2.5 U2"` + NumPCIPassthroughDevices *IntOption `xml:"numPCIPassthroughDevices,omitempty" json:"numPCIPassthroughDevices,omitempty"` // Defines the minimum, maximum, and default // number of VirtualLsiLogicSASController instances available, // at any given time, in the PCI controller. @@ -92146,7 +91688,7 @@ type VirtualPCIControllerOption struct { // This is also limited // by the number of available PCI Express slots in the PCI controller // as well as the total number of supported SCSI controllers. - NumSasSCSIControllers *IntOption `xml:"numSasSCSIControllers,omitempty" json:"numSasSCSIControllers,omitempty" vim:"2.5 U2"` + NumSasSCSIControllers *IntOption `xml:"numSasSCSIControllers,omitempty" json:"numSasSCSIControllers,omitempty"` // Defines the minimum, maximum, and default // number of VirtualVmxnet3 ethernet card instances available, // at any given time, in the PCI controller. @@ -92154,7 +91696,7 @@ type VirtualPCIControllerOption struct { // This is also limited // by the number of available PCI Express slots in the PCI controller // as well as the total number of supported ethernet cards. - NumVmxnet3EthernetCards *IntOption `xml:"numVmxnet3EthernetCards,omitempty" json:"numVmxnet3EthernetCards,omitempty" vim:"2.5 U2"` + NumVmxnet3EthernetCards *IntOption `xml:"numVmxnet3EthernetCards,omitempty" json:"numVmxnet3EthernetCards,omitempty"` // Defines the minimum, maximum, and default // number of ParaVirtualScsiController instances available, // at any given time, in the PCI controller. @@ -92162,7 +91704,7 @@ type VirtualPCIControllerOption struct { // This is also limited // by the number of available PCI Express slots in the PCI controller // as well as the total number of supported SCSI controllers. - NumParaVirtualSCSIControllers *IntOption `xml:"numParaVirtualSCSIControllers,omitempty" json:"numParaVirtualSCSIControllers,omitempty" vim:"2.5 U2"` + NumParaVirtualSCSIControllers *IntOption `xml:"numParaVirtualSCSIControllers,omitempty" json:"numParaVirtualSCSIControllers,omitempty"` // Defines the minimum, maximum, and default // number of VirtualSATAController instances available, // at any given time, in the PCI controller. @@ -92170,7 +91712,7 @@ type VirtualPCIControllerOption struct { // This is also limited // by the number of available PCI Express slots in the PCI controller // as well as the total number of supported SATA controllers. - NumSATAControllers *IntOption `xml:"numSATAControllers,omitempty" json:"numSATAControllers,omitempty" vim:"5.5"` + NumSATAControllers *IntOption `xml:"numSATAControllers,omitempty" json:"numSATAControllers,omitempty"` // Defines the minimum, maximum, and default // number of VirtualNVMEController instances available, // at any given time, in the PCI controller. @@ -92178,7 +91720,7 @@ type VirtualPCIControllerOption struct { // This is also limited // by the number of available PCI Express slots in the PCI controller // as well as the total number of supported NVME controllers. - NumNVMEControllers *IntOption `xml:"numNVMEControllers,omitempty" json:"numNVMEControllers,omitempty" vim:"6.5"` + NumNVMEControllers *IntOption `xml:"numNVMEControllers,omitempty" json:"numNVMEControllers,omitempty"` // Defines the minimum, maximum, and default // number of VirtualVmxnet3Vrdma ethernet card instances available, // at any given time, in the PCI controller. @@ -92186,7 +91728,7 @@ type VirtualPCIControllerOption struct { // This is also limited // by the number of available PCI Express slots in the PCI controller // as well as the total number of supported ethernet cards. - NumVmxnet3VrdmaEthernetCards *IntOption `xml:"numVmxnet3VrdmaEthernetCards,omitempty" json:"numVmxnet3VrdmaEthernetCards,omitempty" vim:"6.7"` + NumVmxnet3VrdmaEthernetCards *IntOption `xml:"numVmxnet3VrdmaEthernetCards,omitempty" json:"numVmxnet3VrdmaEthernetCards,omitempty"` } func init() { @@ -92202,7 +91744,6 @@ type VirtualPCIPassthrough struct { func init() { t["VirtualPCIPassthrough"] = reflect.TypeOf((*VirtualPCIPassthrough)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthrough"] = "4.0" } // A tuple of vendorId and deviceId indicating an allowed device @@ -92242,7 +91783,6 @@ type VirtualPCIPassthroughAllowedDevice struct { func init() { t["VirtualPCIPassthroughAllowedDevice"] = reflect.TypeOf((*VirtualPCIPassthroughAllowedDevice)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughAllowedDevice"] = "7.0" } // The VirtualPCIPassthrough.DeviceBackingInfo data object type @@ -92270,7 +91810,6 @@ type VirtualPCIPassthroughDeviceBackingInfo struct { func init() { t["VirtualPCIPassthroughDeviceBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughDeviceBackingInfo"] = "4.0" } // This data object type describes the options for the @@ -92281,7 +91820,6 @@ type VirtualPCIPassthroughDeviceBackingOption struct { func init() { t["VirtualPCIPassthroughDeviceBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDeviceBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughDeviceBackingOption"] = "4.0" } // DVX Device specific information. @@ -92343,7 +91881,6 @@ type VirtualPCIPassthroughDynamicBackingInfo struct { func init() { t["VirtualPCIPassthroughDynamicBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughDynamicBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughDynamicBackingInfo"] = "7.0" } // This data object type describes the options for the @@ -92354,7 +91891,6 @@ type VirtualPCIPassthroughDynamicBackingOption struct { func init() { t["VirtualPCIPassthroughDynamicBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughDynamicBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughDynamicBackingOption"] = "7.0" } // The VirtualPCIPassthroughOption data object type describes the options @@ -92367,7 +91903,6 @@ type VirtualPCIPassthroughOption struct { func init() { t["VirtualPCIPassthroughOption"] = reflect.TypeOf((*VirtualPCIPassthroughOption)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughOption"] = "4.0" } // The VirtualPCIPassthrough.PluginBackingInfo is a base data object type @@ -92382,7 +91917,6 @@ type VirtualPCIPassthroughPluginBackingInfo struct { func init() { t["VirtualPCIPassthroughPluginBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughPluginBackingInfo"] = "6.0" } // This data object type describes the options for the @@ -92393,7 +91927,6 @@ type VirtualPCIPassthroughPluginBackingOption struct { func init() { t["VirtualPCIPassthroughPluginBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughPluginBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughPluginBackingOption"] = "6.0" } // The VirtualPCIPassthrough.VmiopBackingInfo data object type @@ -92417,7 +91950,6 @@ type VirtualPCIPassthroughVmiopBackingInfo struct { func init() { t["VirtualPCIPassthroughVmiopBackingInfo"] = reflect.TypeOf((*VirtualPCIPassthroughVmiopBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughVmiopBackingInfo"] = "6.0" } // This data object type describes the options for the @@ -92440,7 +91972,6 @@ type VirtualPCIPassthroughVmiopBackingOption struct { func init() { t["VirtualPCIPassthroughVmiopBackingOption"] = reflect.TypeOf((*VirtualPCIPassthroughVmiopBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualPCIPassthroughVmiopBackingOption"] = "6.0" } // This data object type defines the properties @@ -92501,9 +92032,9 @@ type VirtualPS2ControllerOption struct { // This is further constrained by the number // of available slots in the PS/2 controller. The minimum, maximum, // and default are integers defined by three properties: - // - `*numKeyBoards.min*`: the minimum. - // - `*numKeyBoards.max*`: the maximum. - // - `*numKeyBoards.defaultValue*`: the default number. + // - `*numKeyBoards.min*`: the minimum. + // - `*numKeyBoards.max*`: the maximum. + // - `*numKeyBoards.defaultValue*`: the default number. NumKeyboards IntOption `xml:"numKeyboards" json:"numKeyboards"` // The minimum, maximum, and default number of mice you can // have at any given time. @@ -92511,9 +92042,9 @@ type VirtualPS2ControllerOption struct { // The number of mice is also limited by the number // of available slots in the PS/2 controller. The minimum, maximum, and // default are integers defined by three properties: - // - `*numPointingDevices.min*`: the minimum. - // - `*numPointingDevices.max*`: the maximum. - // - `*numPointingDevices.defaultValue*`: the default number. + // - `*numPointingDevices.min*`: the minimum. + // - `*numPointingDevices.max*`: the maximum. + // - `*numPointingDevices.defaultValue*`: the default number. NumPointingDevices IntOption `xml:"numPointingDevices" json:"numPointingDevices"` } @@ -92597,14 +92128,14 @@ type VirtualPointingDeviceBackingOption struct { // This object defines the supported mouse types, including the default // supported mouse type, with the following properties: - // - `*hostPointingDevices.value*`: This array defines the - // supported mouse types. - // - `*hostPointingDevices.choiceDescription*`: This array - // provides the descriptions for the supported mouse types defined by - // hostPointingDevices.value. - // - `*hostPointingDevices.defaultIndex*`: This integer points - // to an index in the hostPointingDevices.value array. This is the - // mouse type supported by default. + // - `*hostPointingDevices.value*`: This array defines the + // supported mouse types. + // - `*hostPointingDevices.choiceDescription*`: This array + // provides the descriptions for the supported mouse types defined by + // hostPointingDevices.value. + // - `*hostPointingDevices.defaultIndex*`: This integer points + // to an index in the hostPointingDevices.value array. This is the + // mouse type supported by default. HostPointingDevice ChoiceOption `xml:"hostPointingDevice" json:"hostPointingDevice"` } @@ -92661,7 +92192,6 @@ type VirtualPrecisionClock struct { func init() { t["VirtualPrecisionClock"] = reflect.TypeOf((*VirtualPrecisionClock)(nil)).Elem() - minAPIVersionForType["VirtualPrecisionClock"] = "7.0" } // The VirtualPrecisionClockOption data object type describes the @@ -92673,7 +92203,6 @@ type VirtualPrecisionClockOption struct { func init() { t["VirtualPrecisionClockOption"] = reflect.TypeOf((*VirtualPrecisionClockOption)(nil)).Elem() - minAPIVersionForType["VirtualPrecisionClockOption"] = "7.0" } // The `VirtualPrecisionClockSystemClockBackingInfo` @@ -92690,7 +92219,6 @@ type VirtualPrecisionClockSystemClockBackingInfo struct { func init() { t["VirtualPrecisionClockSystemClockBackingInfo"] = reflect.TypeOf((*VirtualPrecisionClockSystemClockBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualPrecisionClockSystemClockBackingInfo"] = "7.0" } // This data object type describes the options for the @@ -92706,7 +92234,6 @@ type VirtualPrecisionClockSystemClockBackingOption struct { func init() { t["VirtualPrecisionClockSystemClockBackingOption"] = reflect.TypeOf((*VirtualPrecisionClockSystemClockBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualPrecisionClockSystemClockBackingOption"] = "7.0" } // The VirtualSATAController data object type represents @@ -92717,7 +92244,6 @@ type VirtualSATAController struct { func init() { t["VirtualSATAController"] = reflect.TypeOf((*VirtualSATAController)(nil)).Elem() - minAPIVersionForType["VirtualSATAController"] = "5.5" } // The VirtualSATAControllerOption data object type contains the options @@ -92747,7 +92273,6 @@ type VirtualSATAControllerOption struct { func init() { t["VirtualSATAControllerOption"] = reflect.TypeOf((*VirtualSATAControllerOption)(nil)).Elem() - minAPIVersionForType["VirtualSATAControllerOption"] = "5.5" } // The VirtualSCSIController data object type represents @@ -93156,15 +92681,15 @@ type VirtualSerialPortPipeBackingOption struct { // // If optimized data transfer is supported (noRxLoss.supported // is true): - // - You can enable (or disable) the feature explicitly by setting the - // `VirtualSerialPortPipeBackingInfo.noRxLoss` - // property on the pipe backing information object. - // - If you do not set the - // `VirtualSerialPortPipeBackingInfo.noRxLoss` - // property on the - // the pipe backing information object, the server enables - // optimized data transfer if the noRxLoss.defaultValue - // property on the pipe backing options object is true. + // - You can enable (or disable) the feature explicitly by setting the + // `VirtualSerialPortPipeBackingInfo.noRxLoss` + // property on the pipe backing information object. + // - If you do not set the + // `VirtualSerialPortPipeBackingInfo.noRxLoss` + // property on the + // the pipe backing information object, the server enables + // optimized data transfer if the noRxLoss.defaultValue + // property on the pipe backing options object is true. // // If noRxLoss.supported is false, the server // ignores the optimization settings. @@ -93187,7 +92712,6 @@ type VirtualSerialPortThinPrintBackingInfo struct { func init() { t["VirtualSerialPortThinPrintBackingInfo"] = reflect.TypeOf((*VirtualSerialPortThinPrintBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualSerialPortThinPrintBackingInfo"] = "5.1" } // The `VirtualSerialPortThinPrintBackingOption` data @@ -93198,7 +92722,6 @@ type VirtualSerialPortThinPrintBackingOption struct { func init() { t["VirtualSerialPortThinPrintBackingOption"] = reflect.TypeOf((*VirtualSerialPortThinPrintBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualSerialPortThinPrintBackingOption"] = "5.1" } // The `VirtualSerialPortURIBackingInfo` data object @@ -93333,7 +92856,6 @@ type VirtualSerialPortURIBackingInfo struct { func init() { t["VirtualSerialPortURIBackingInfo"] = reflect.TypeOf((*VirtualSerialPortURIBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualSerialPortURIBackingInfo"] = "4.1" } // The `VirtualSerialPortURIBackingOption` data object type @@ -93344,7 +92866,6 @@ type VirtualSerialPortURIBackingOption struct { func init() { t["VirtualSerialPortURIBackingOption"] = reflect.TypeOf((*VirtualSerialPortURIBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualSerialPortURIBackingOption"] = "4.1" } // The VirtualSoundBlaster16 data object type represents a Sound @@ -93431,7 +92952,6 @@ type VirtualSriovEthernetCard struct { func init() { t["VirtualSriovEthernetCard"] = reflect.TypeOf((*VirtualSriovEthernetCard)(nil)).Elem() - minAPIVersionForType["VirtualSriovEthernetCard"] = "5.5" } // The VirtualSriovEthernetCardOption data object contains the options for the @@ -93442,7 +92962,6 @@ type VirtualSriovEthernetCardOption struct { func init() { t["VirtualSriovEthernetCardOption"] = reflect.TypeOf((*VirtualSriovEthernetCardOption)(nil)).Elem() - minAPIVersionForType["VirtualSriovEthernetCardOption"] = "5.5" } // The `VirtualSriovEthernetCardSriovBackingInfo` @@ -93483,7 +93002,6 @@ type VirtualSriovEthernetCardSriovBackingInfo struct { func init() { t["VirtualSriovEthernetCardSriovBackingInfo"] = reflect.TypeOf((*VirtualSriovEthernetCardSriovBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualSriovEthernetCardSriovBackingInfo"] = "5.5" } // This data object contains the option for SriovBackingInfo data @@ -93494,7 +93012,6 @@ type VirtualSriovEthernetCardSriovBackingOption struct { func init() { t["VirtualSriovEthernetCardSriovBackingOption"] = reflect.TypeOf((*VirtualSriovEthernetCardSriovBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualSriovEthernetCardSriovBackingOption"] = "5.5" } // The `VirtualSwitchProfile` data object represents a subprofile @@ -93520,7 +93037,6 @@ type VirtualSwitchProfile struct { func init() { t["VirtualSwitchProfile"] = reflect.TypeOf((*VirtualSwitchProfile)(nil)).Elem() - minAPIVersionForType["VirtualSwitchProfile"] = "4.0" } // The `VirtualSwitchSelectionProfile` data object represents @@ -93534,7 +93050,6 @@ type VirtualSwitchSelectionProfile struct { func init() { t["VirtualSwitchSelectionProfile"] = reflect.TypeOf((*VirtualSwitchSelectionProfile)(nil)).Elem() - minAPIVersionForType["VirtualSwitchSelectionProfile"] = "4.0" } // This data object type represents a TPM 2.0 module @@ -93558,7 +93073,6 @@ type VirtualTPM struct { func init() { t["VirtualTPM"] = reflect.TypeOf((*VirtualTPM)(nil)).Elem() - minAPIVersionForType["VirtualTPM"] = "6.7" } // This data object type contains the options for the @@ -93575,7 +93089,6 @@ type VirtualTPMOption struct { func init() { t["VirtualTPMOption"] = reflect.TypeOf((*VirtualTPMOption)(nil)).Elem() - minAPIVersionForType["VirtualTPMOption"] = "6.7" } // The `VirtualUSB` data object describes the USB device configuration @@ -93647,21 +93160,21 @@ type VirtualUSB struct { // can not be satisfied, either // because there is no such device, or the matching device is not // available. Valid only while the virtual machine is running. - Connected bool `xml:"connected" json:"connected" vim:"2.5"` + Connected bool `xml:"connected" json:"connected"` // Vendor ID of the USB device. - Vendor int32 `xml:"vendor,omitempty" json:"vendor,omitempty" vim:"4.1"` + Vendor int32 `xml:"vendor,omitempty" json:"vendor,omitempty"` // Product ID of the USB device. - Product int32 `xml:"product,omitempty" json:"product,omitempty" vim:"4.1"` + Product int32 `xml:"product,omitempty" json:"product,omitempty"` // Device class families. // // For possible values see // `VirtualMachineUsbInfoFamily_enum`. - Family []string `xml:"family,omitempty" json:"family,omitempty" vim:"4.1"` + Family []string `xml:"family,omitempty" json:"family,omitempty"` // Device speeds detected by server. // // For possible values see // `VirtualMachineUsbInfoSpeed_enum`. - Speed []string `xml:"speed,omitempty" json:"speed,omitempty" vim:"4.1"` + Speed []string `xml:"speed,omitempty" json:"speed,omitempty"` } func init() { @@ -93695,7 +93208,7 @@ type VirtualUSBController struct { AutoConnectDevices *bool `xml:"autoConnectDevices" json:"autoConnectDevices,omitempty"` // Flag to indicate whether or not enhanced host controller // interface (USB 2.0) is enabled on this controller. - EhciEnabled *bool `xml:"ehciEnabled" json:"ehciEnabled,omitempty" vim:"2.5"` + EhciEnabled *bool `xml:"ehciEnabled" json:"ehciEnabled,omitempty"` } func init() { @@ -93712,11 +93225,11 @@ type VirtualUSBControllerOption struct { AutoConnectDevices BoolOption `xml:"autoConnectDevices" json:"autoConnectDevices"` // Flag to indicate whether or not enhanced host controller // interface (USB 2.0) is available on this virtual USB controller. - EhciSupported BoolOption `xml:"ehciSupported" json:"ehciSupported" vim:"2.5"` + EhciSupported BoolOption `xml:"ehciSupported" json:"ehciSupported"` // Range of USB device speeds supported by this USB controller type. // // Acceptable values are specified at `VirtualMachineUsbInfoSpeed_enum`. - SupportedSpeeds []string `xml:"supportedSpeeds,omitempty" json:"supportedSpeeds,omitempty" vim:"5.0"` + SupportedSpeeds []string `xml:"supportedSpeeds,omitempty" json:"supportedSpeeds,omitempty"` } func init() { @@ -93738,7 +93251,6 @@ type VirtualUSBControllerPciBusSlotInfo struct { func init() { t["VirtualUSBControllerPciBusSlotInfo"] = reflect.TypeOf((*VirtualUSBControllerPciBusSlotInfo)(nil)).Elem() - minAPIVersionForType["VirtualUSBControllerPciBusSlotInfo"] = "5.1" } // The `VirtualUSBOption` data object type contains options for @@ -93770,7 +93282,6 @@ type VirtualUSBRemoteClientBackingInfo struct { func init() { t["VirtualUSBRemoteClientBackingInfo"] = reflect.TypeOf((*VirtualUSBRemoteClientBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualUSBRemoteClientBackingInfo"] = "5.0" } // This data object type contains the options for @@ -93781,7 +93292,6 @@ type VirtualUSBRemoteClientBackingOption struct { func init() { t["VirtualUSBRemoteClientBackingOption"] = reflect.TypeOf((*VirtualUSBRemoteClientBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualUSBRemoteClientBackingOption"] = "5.0" } // The `VirtualUSBRemoteHostBackingInfo` data object @@ -93834,7 +93344,6 @@ type VirtualUSBRemoteHostBackingInfo struct { func init() { t["VirtualUSBRemoteHostBackingInfo"] = reflect.TypeOf((*VirtualUSBRemoteHostBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualUSBRemoteHostBackingInfo"] = "4.1" } // The `VirtualUSBRemoteHostBackingOption` data object @@ -93849,7 +93358,6 @@ type VirtualUSBRemoteHostBackingOption struct { func init() { t["VirtualUSBRemoteHostBackingOption"] = reflect.TypeOf((*VirtualUSBRemoteHostBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualUSBRemoteHostBackingOption"] = "4.1" } // The `VirtualUSBUSBBackingInfo` data object @@ -93891,7 +93399,6 @@ type VirtualUSBUSBBackingInfo struct { func init() { t["VirtualUSBUSBBackingInfo"] = reflect.TypeOf((*VirtualUSBUSBBackingInfo)(nil)).Elem() - minAPIVersionForType["VirtualUSBUSBBackingInfo"] = "2.5" } // The `VirtualUSBUSBBackingOption` data object @@ -93906,7 +93413,6 @@ type VirtualUSBUSBBackingOption struct { func init() { t["VirtualUSBUSBBackingOption"] = reflect.TypeOf((*VirtualUSBUSBBackingOption)(nil)).Elem() - minAPIVersionForType["VirtualUSBUSBBackingOption"] = "2.5" } // The `VirtualUSBXHCIController` data object describes a virtual @@ -93923,7 +93429,6 @@ type VirtualUSBXHCIController struct { func init() { t["VirtualUSBXHCIController"] = reflect.TypeOf((*VirtualUSBXHCIController)(nil)).Elem() - minAPIVersionForType["VirtualUSBXHCIController"] = "5.0" } // The VirtualUSBXHCIControllerOption data object type contains the options @@ -93942,7 +93447,6 @@ type VirtualUSBXHCIControllerOption struct { func init() { t["VirtualUSBXHCIControllerOption"] = reflect.TypeOf((*VirtualUSBXHCIControllerOption)(nil)).Elem() - minAPIVersionForType["VirtualUSBXHCIControllerOption"] = "5.0" } // This data object type contains the options for the @@ -93953,7 +93457,6 @@ type VirtualVMIROMOption struct { func init() { t["VirtualVMIROMOption"] = reflect.TypeOf((*VirtualVMIROMOption)(nil)).Elem() - minAPIVersionForType["VirtualVMIROMOption"] = "2.5" } // This data object type contains the options for the @@ -93964,20 +93467,20 @@ type VirtualVideoCardOption struct { // Minimum, maximum and default size of the video frame buffer. VideoRamSizeInKB *LongOption `xml:"videoRamSizeInKB,omitempty" json:"videoRamSizeInKB,omitempty"` // Minimum, maximum and default value for the number of displays. - NumDisplays *IntOption `xml:"numDisplays,omitempty" json:"numDisplays,omitempty" vim:"2.5 U2"` + NumDisplays *IntOption `xml:"numDisplays,omitempty" json:"numDisplays,omitempty"` // Flag to indicate whether the display settings of the host should // be used to automatically determine the display settings of the // virtual machine's video card. - UseAutoDetect *BoolOption `xml:"useAutoDetect,omitempty" json:"useAutoDetect,omitempty" vim:"2.5 U2"` + UseAutoDetect *BoolOption `xml:"useAutoDetect,omitempty" json:"useAutoDetect,omitempty"` // Flag to indicate whether the virtual video card supports 3D functions. - Support3D *BoolOption `xml:"support3D,omitempty" json:"support3D,omitempty" vim:"2.5 U2"` + Support3D *BoolOption `xml:"support3D,omitempty" json:"support3D,omitempty"` // Flag to indicate whether the virtual video card can specify how to render 3D graphics. - Use3dRendererSupported *BoolOption `xml:"use3dRendererSupported,omitempty" json:"use3dRendererSupported,omitempty" vim:"5.1"` + Use3dRendererSupported *BoolOption `xml:"use3dRendererSupported,omitempty" json:"use3dRendererSupported,omitempty"` // The minimum, maximum, and default values for graphics memory size. - GraphicsMemorySizeInKB *LongOption `xml:"graphicsMemorySizeInKB,omitempty" json:"graphicsMemorySizeInKB,omitempty" vim:"6.0"` + GraphicsMemorySizeInKB *LongOption `xml:"graphicsMemorySizeInKB,omitempty" json:"graphicsMemorySizeInKB,omitempty"` // Flag to indicate whether the virtual video card can specify the size // of graphics memory. - GraphicsMemorySizeSupported *BoolOption `xml:"graphicsMemorySizeSupported,omitempty" json:"graphicsMemorySizeSupported,omitempty" vim:"6.0"` + GraphicsMemorySizeSupported *BoolOption `xml:"graphicsMemorySizeSupported,omitempty" json:"graphicsMemorySizeSupported,omitempty"` } func init() { @@ -94002,7 +93505,6 @@ type VirtualVmxnet2 struct { func init() { t["VirtualVmxnet2"] = reflect.TypeOf((*VirtualVmxnet2)(nil)).Elem() - minAPIVersionForType["VirtualVmxnet2"] = "2.5" } // The VirtualVmxnet2Option data object type contains the options for the @@ -94013,7 +93515,6 @@ type VirtualVmxnet2Option struct { func init() { t["VirtualVmxnet2Option"] = reflect.TypeOf((*VirtualVmxnet2Option)(nil)).Elem() - minAPIVersionForType["VirtualVmxnet2Option"] = "2.5" } // The VirtualVmxnet3 data object type represents an instance @@ -94033,7 +93534,6 @@ type VirtualVmxnet3 struct { func init() { t["VirtualVmxnet3"] = reflect.TypeOf((*VirtualVmxnet3)(nil)).Elem() - minAPIVersionForType["VirtualVmxnet3"] = "2.5 U2" } // The VirtualVmxnet3Option data object type contains the options for the @@ -94048,7 +93548,6 @@ type VirtualVmxnet3Option struct { func init() { t["VirtualVmxnet3Option"] = reflect.TypeOf((*VirtualVmxnet3Option)(nil)).Elem() - minAPIVersionForType["VirtualVmxnet3Option"] = "2.5 U2" } // The VirtualVmxnet3Vrdma data object type represents an instance of the @@ -94061,12 +93560,11 @@ type VirtualVmxnet3Vrdma struct { // // See // `VirtualVmxnet3VrdmaOptionDeviceProtocols_enum` for more information. - DeviceProtocol string `xml:"deviceProtocol,omitempty" json:"deviceProtocol,omitempty" vim:"6.7"` + DeviceProtocol string `xml:"deviceProtocol,omitempty" json:"deviceProtocol,omitempty"` } func init() { t["VirtualVmxnet3Vrdma"] = reflect.TypeOf((*VirtualVmxnet3Vrdma)(nil)).Elem() - minAPIVersionForType["VirtualVmxnet3Vrdma"] = "6.5" } // The VirtualVmxnet3VrdmaOption data object type contains the options for the @@ -94075,12 +93573,11 @@ type VirtualVmxnet3VrdmaOption struct { VirtualVmxnet3Option // The supported device protocols. - DeviceProtocol *ChoiceOption `xml:"deviceProtocol,omitempty" json:"deviceProtocol,omitempty" vim:"6.7"` + DeviceProtocol *ChoiceOption `xml:"deviceProtocol,omitempty" json:"deviceProtocol,omitempty"` } func init() { t["VirtualVmxnet3VrdmaOption"] = reflect.TypeOf((*VirtualVmxnet3VrdmaOption)(nil)).Elem() - minAPIVersionForType["VirtualVmxnet3VrdmaOption"] = "6.5" } // The VirtualVmxnetOption data object type contains the options for the @@ -94113,7 +93610,6 @@ type VirtualWDT struct { func init() { t["VirtualWDT"] = reflect.TypeOf((*VirtualWDT)(nil)).Elem() - minAPIVersionForType["VirtualWDT"] = "7.0" } // This data object type contains the options for the @@ -94128,7 +93624,6 @@ type VirtualWDTOption struct { func init() { t["VirtualWDTOption"] = reflect.TypeOf((*VirtualWDTOption)(nil)).Elem() - minAPIVersionForType["VirtualWDTOption"] = "7.0" } // The `VlanProfile` data object represents @@ -94142,7 +93637,6 @@ type VlanProfile struct { func init() { t["VlanProfile"] = reflect.TypeOf((*VlanProfile)(nil)).Elem() - minAPIVersionForType["VlanProfile"] = "4.0" } // This event records a user successfully acquiring an MKS ticket @@ -94152,7 +93646,6 @@ type VmAcquiredMksTicketEvent struct { func init() { t["VmAcquiredMksTicketEvent"] = reflect.TypeOf((*VmAcquiredMksTicketEvent)(nil)).Elem() - minAPIVersionForType["VmAcquiredMksTicketEvent"] = "2.5" } // This event records a user successfully acquiring a ticket @@ -94165,7 +93658,6 @@ type VmAcquiredTicketEvent struct { func init() { t["VmAcquiredTicketEvent"] = reflect.TypeOf((*VmAcquiredTicketEvent)(nil)).Elem() - minAPIVersionForType["VmAcquiredTicketEvent"] = "4.1" } // Fault thrown when moving a standalone host between datacenters, and @@ -94189,7 +93681,6 @@ type VmAlreadyExistsInDatacenter struct { func init() { t["VmAlreadyExistsInDatacenter"] = reflect.TypeOf((*VmAlreadyExistsInDatacenter)(nil)).Elem() - minAPIVersionForType["VmAlreadyExistsInDatacenter"] = "4.0" } type VmAlreadyExistsInDatacenterFault VmAlreadyExistsInDatacenter @@ -94241,7 +93732,6 @@ type VmBeingClonedNoFolderEvent struct { func init() { t["VmBeingClonedNoFolderEvent"] = reflect.TypeOf((*VmBeingClonedNoFolderEvent)(nil)).Elem() - minAPIVersionForType["VmBeingClonedNoFolderEvent"] = "4.1" } // This event records a virtual machine being created. @@ -94275,9 +93765,9 @@ type VmBeingHotMigratedEvent struct { // The destination host to which the virtual machine is to be migrated. DestHost HostEventArgument `xml:"destHost" json:"destHost"` // The destination datacenter to which the virtual machine is being migrated - DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty" vim:"5.0"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty"` // The destination primary datastore to which the virtual machine is being migrated - DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty" vim:"5.0"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty"` } func init() { @@ -94291,9 +93781,9 @@ type VmBeingMigratedEvent struct { // The destination host. DestHost HostEventArgument `xml:"destHost" json:"destHost"` // The destination datacenter - DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty" vim:"5.0"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty"` // The destination primary datastore - DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty" vim:"5.0"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty"` } func init() { @@ -94307,9 +93797,9 @@ type VmBeingRelocatedEvent struct { // The destination host to which the virtual machine is being relocated. DestHost HostEventArgument `xml:"destHost" json:"destHost"` // The destination datacenter to which the virtual machine is being relocated - DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty" vim:"5.0"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty"` // The destination primary datastore to which the virtual machine is being relocated - DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty" vim:"5.0"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty"` } func init() { @@ -94390,7 +93880,6 @@ type VmConfigFileEncryptionInfo struct { func init() { t["VmConfigFileEncryptionInfo"] = reflect.TypeOf((*VmConfigFileEncryptionInfo)(nil)).Elem() - minAPIVersionForType["VmConfigFileEncryptionInfo"] = "6.5" } // This data object type describes a virtual machine configuration file. @@ -94404,7 +93893,7 @@ type VmConfigFileInfo struct { // If encryption was selected in VmConfigFileQueryFlags then this // field is always set. Inspect the VmConfigEncryptionInfo to // determine if the virtual machine configuration file is encrypted. - Encryption *VmConfigFileEncryptionInfo `xml:"encryption,omitempty" json:"encryption,omitempty" vim:"6.5"` + Encryption *VmConfigFileEncryptionInfo `xml:"encryption,omitempty" json:"encryption,omitempty"` } func init() { @@ -94439,7 +93928,7 @@ type VmConfigFileQueryFilter struct { // This optional property can be used to filter virtual // machine configuration files based on whether they are // encrypted or not. - Encrypted *bool `xml:"encrypted" json:"encrypted,omitempty" vim:"6.5"` + Encrypted *bool `xml:"encrypted" json:"encrypted,omitempty"` } func init() { @@ -94454,7 +93943,7 @@ type VmConfigFileQueryFlags struct { ConfigVersion bool `xml:"configVersion" json:"configVersion"` // The flag to indicate whether the encryption information of the // virtual machine configuration is returned. - Encryption *bool `xml:"encryption" json:"encryption,omitempty" vim:"6.5"` + Encryption *bool `xml:"encryption" json:"encryption,omitempty"` } func init() { @@ -94475,7 +93964,6 @@ type VmConfigIncompatibleForFaultTolerance struct { func init() { t["VmConfigIncompatibleForFaultTolerance"] = reflect.TypeOf((*VmConfigIncompatibleForFaultTolerance)(nil)).Elem() - minAPIVersionForType["VmConfigIncompatibleForFaultTolerance"] = "4.0" } type VmConfigIncompatibleForFaultToleranceFault VmConfigIncompatibleForFaultTolerance @@ -94500,7 +93988,6 @@ type VmConfigIncompatibleForRecordReplay struct { func init() { t["VmConfigIncompatibleForRecordReplay"] = reflect.TypeOf((*VmConfigIncompatibleForRecordReplay)(nil)).Elem() - minAPIVersionForType["VmConfigIncompatibleForRecordReplay"] = "4.0" } type VmConfigIncompatibleForRecordReplayFault VmConfigIncompatibleForRecordReplay @@ -94548,7 +94035,6 @@ type VmConfigInfo struct { func init() { t["VmConfigInfo"] = reflect.TypeOf((*VmConfigInfo)(nil)).Elem() - minAPIVersionForType["VmConfigInfo"] = "4.0" } // This event records if the configuration file can not be found. @@ -94621,7 +94107,6 @@ type VmConfigSpec struct { func init() { t["VmConfigSpec"] = reflect.TypeOf((*VmConfigSpec)(nil)).Elem() - minAPIVersionForType["VmConfigSpec"] = "4.0" } // This event records that a virtual machine is connected. @@ -94649,12 +94134,11 @@ type VmDasBeingResetEvent struct { VmEvent // The reason why this vm is being reset - Reason string `xml:"reason,omitempty" json:"reason,omitempty" vim:"4.1"` + Reason string `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { t["VmDasBeingResetEvent"] = reflect.TypeOf((*VmDasBeingResetEvent)(nil)).Elem() - minAPIVersionForType["VmDasBeingResetEvent"] = "4.0" } // This event records when a virtual machine is reset by @@ -94668,7 +94152,6 @@ type VmDasBeingResetWithScreenshotEvent struct { func init() { t["VmDasBeingResetWithScreenshotEvent"] = reflect.TypeOf((*VmDasBeingResetWithScreenshotEvent)(nil)).Elem() - minAPIVersionForType["VmDasBeingResetWithScreenshotEvent"] = "4.0" } // This event records when HA VM Health Monitoring fails to reset @@ -94679,7 +94162,6 @@ type VmDasResetFailedEvent struct { func init() { t["VmDasResetFailedEvent"] = reflect.TypeOf((*VmDasResetFailedEvent)(nil)).Elem() - minAPIVersionForType["VmDasResetFailedEvent"] = "4.0" } // The event records that an error occurred when updating the HA agents @@ -94789,7 +94271,6 @@ type VmDiskFileEncryptionInfo struct { func init() { t["VmDiskFileEncryptionInfo"] = reflect.TypeOf((*VmDiskFileEncryptionInfo)(nil)).Elem() - minAPIVersionForType["VmDiskFileEncryptionInfo"] = "6.5" } // This data object type describes a virtual disk primary file. @@ -94815,17 +94296,17 @@ type VmDiskFileInfo struct { // controller and plugged into an lsilogic controller. // // The controller type suitable for this virtual disk. - ControllerType string `xml:"controllerType,omitempty" json:"controllerType,omitempty" vim:"2.5"` + ControllerType string `xml:"controllerType,omitempty" json:"controllerType,omitempty"` // The extents of this virtual disk specified in absolute DS paths - DiskExtents []string `xml:"diskExtents,omitempty" json:"diskExtents,omitempty" vim:"2.5"` + DiskExtents []string `xml:"diskExtents,omitempty" json:"diskExtents,omitempty"` // Indicates if the disk is thin-provisioned - Thin *bool `xml:"thin" json:"thin,omitempty" vim:"4.0"` + Thin *bool `xml:"thin" json:"thin,omitempty"` // The encryption information of the virtual disk. // // If encryption was selected in VmDiskFileQueryFlags then this // field is always set. Inspect the VmDiskEncryptionInfo to // determine if the virtual disk is encrypted. - Encryption *VmDiskFileEncryptionInfo `xml:"encryption,omitempty" json:"encryption,omitempty" vim:"6.5"` + Encryption *VmDiskFileEncryptionInfo `xml:"encryption,omitempty" json:"encryption,omitempty"` } func init() { @@ -94886,14 +94367,14 @@ type VmDiskFileQueryFilter struct { // virtual disk. // // See also `VirtualIDEController`, `VirtualSCSIController`. - ControllerType []string `xml:"controllerType,omitempty" json:"controllerType,omitempty" vim:"2.5"` + ControllerType []string `xml:"controllerType,omitempty" json:"controllerType,omitempty"` // This optional property can be used to filter disks based on whether // they are thin-provsioned or not: if set to true, only thin-provisioned // disks are returned, and vice-versa. - Thin *bool `xml:"thin" json:"thin,omitempty" vim:"4.0"` + Thin *bool `xml:"thin" json:"thin,omitempty"` // This optional property can be used to filter disks based on // whether they are encrypted or not. - Encrypted *bool `xml:"encrypted" json:"encrypted,omitempty" vim:"6.5"` + Encrypted *bool `xml:"encrypted" json:"encrypted,omitempty"` } func init() { @@ -94922,15 +94403,15 @@ type VmDiskFileQueryFlags struct { // // The flag to indicate whether or not the controller type of the virtual disk // file is returned. - ControllerType *bool `xml:"controllerType" json:"controllerType,omitempty" vim:"2.5"` + ControllerType *bool `xml:"controllerType" json:"controllerType,omitempty"` // The flag to indicate whether or not the disk extents of the virtual disk // are returned. - DiskExtents *bool `xml:"diskExtents" json:"diskExtents,omitempty" vim:"2.5"` + DiskExtents *bool `xml:"diskExtents" json:"diskExtents,omitempty"` // The flag to indicate whether the thin-ness of the disk is returned. - Thin *bool `xml:"thin" json:"thin,omitempty" vim:"4.0"` + Thin *bool `xml:"thin" json:"thin,omitempty"` // The flag to indicate whether the encryption information of the // virtual disk is returned. - Encryption *bool `xml:"encryption" json:"encryption,omitempty" vim:"6.5"` + Encryption *bool `xml:"encryption" json:"encryption,omitempty"` } func init() { @@ -94955,7 +94436,6 @@ type VmEndRecordingEvent struct { func init() { t["VmEndRecordingEvent"] = reflect.TypeOf((*VmEndRecordingEvent)(nil)).Elem() - minAPIVersionForType["VmEndRecordingEvent"] = "4.0" } // Deprecated as of vSphere API 6.0. @@ -94967,7 +94447,6 @@ type VmEndReplayingEvent struct { func init() { t["VmEndReplayingEvent"] = reflect.TypeOf((*VmEndReplayingEvent)(nil)).Elem() - minAPIVersionForType["VmEndReplayingEvent"] = "4.0" } // These are virtual machine events. @@ -95005,9 +94484,9 @@ type VmFailedMigrateEvent struct { // The reason for the failure. Reason LocalizedMethodFault `xml:"reason" json:"reason"` // The destination datacenter - DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty" vim:"5.0"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty"` // The destination primary datastore - DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty" vim:"5.0"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty"` } func init() { @@ -95048,7 +94527,6 @@ type VmFailedStartingSecondaryEvent struct { func init() { t["VmFailedStartingSecondaryEvent"] = reflect.TypeOf((*VmFailedStartingSecondaryEvent)(nil)).Elem() - minAPIVersionForType["VmFailedStartingSecondaryEvent"] = "4.0" } // This event records a failure to power off a virtual machine. @@ -95144,7 +94622,6 @@ type VmFailedUpdatingSecondaryConfig struct { func init() { t["VmFailedUpdatingSecondaryConfig"] = reflect.TypeOf((*VmFailedUpdatingSecondaryConfig)(nil)).Elem() - minAPIVersionForType["VmFailedUpdatingSecondaryConfig"] = "4.0" } // This event records when a virtual machine failover was unsuccessful. @@ -95152,7 +94629,7 @@ type VmFailoverFailed struct { VmEvent // The reason for the failure - Reason *LocalizedMethodFault `xml:"reason,omitempty" json:"reason,omitempty" vim:"4.1"` + Reason *LocalizedMethodFault `xml:"reason,omitempty" json:"reason,omitempty"` } func init() { @@ -95179,7 +94656,6 @@ type VmFaultToleranceConfigIssue struct { func init() { t["VmFaultToleranceConfigIssue"] = reflect.TypeOf((*VmFaultToleranceConfigIssue)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceConfigIssue"] = "4.0" } type VmFaultToleranceConfigIssueFault VmFaultToleranceConfigIssue @@ -95208,7 +94684,6 @@ type VmFaultToleranceConfigIssueWrapper struct { func init() { t["VmFaultToleranceConfigIssueWrapper"] = reflect.TypeOf((*VmFaultToleranceConfigIssueWrapper)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceConfigIssueWrapper"] = "4.1" } type VmFaultToleranceConfigIssueWrapperFault VmFaultToleranceConfigIssueWrapper @@ -95229,7 +94704,6 @@ type VmFaultToleranceInvalidFileBacking struct { func init() { t["VmFaultToleranceInvalidFileBacking"] = reflect.TypeOf((*VmFaultToleranceInvalidFileBacking)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceInvalidFileBacking"] = "4.0" } type VmFaultToleranceInvalidFileBackingFault VmFaultToleranceInvalidFileBacking @@ -95246,7 +94720,6 @@ type VmFaultToleranceIssue struct { func init() { t["VmFaultToleranceIssue"] = reflect.TypeOf((*VmFaultToleranceIssue)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceIssue"] = "4.0" } type VmFaultToleranceIssueFault BaseVmFaultToleranceIssue @@ -95267,7 +94740,6 @@ type VmFaultToleranceOpIssuesList struct { func init() { t["VmFaultToleranceOpIssuesList"] = reflect.TypeOf((*VmFaultToleranceOpIssuesList)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceOpIssuesList"] = "4.0" } type VmFaultToleranceOpIssuesListFault VmFaultToleranceOpIssuesList @@ -95295,7 +94767,6 @@ type VmFaultToleranceStateChangedEvent struct { func init() { t["VmFaultToleranceStateChangedEvent"] = reflect.TypeOf((*VmFaultToleranceStateChangedEvent)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceStateChangedEvent"] = "4.0" } // This fault is returned when a host has more than the recommended number of @@ -95311,7 +94782,6 @@ type VmFaultToleranceTooManyFtVcpusOnHost struct { func init() { t["VmFaultToleranceTooManyFtVcpusOnHost"] = reflect.TypeOf((*VmFaultToleranceTooManyFtVcpusOnHost)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceTooManyFtVcpusOnHost"] = "6.0" } type VmFaultToleranceTooManyFtVcpusOnHostFault VmFaultToleranceTooManyFtVcpusOnHost @@ -95332,7 +94802,6 @@ type VmFaultToleranceTooManyVMsOnHost struct { func init() { t["VmFaultToleranceTooManyVMsOnHost"] = reflect.TypeOf((*VmFaultToleranceTooManyVMsOnHost)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceTooManyVMsOnHost"] = "4.1" } type VmFaultToleranceTooManyVMsOnHostFault VmFaultToleranceTooManyVMsOnHost @@ -95350,7 +94819,6 @@ type VmFaultToleranceTurnedOffEvent struct { func init() { t["VmFaultToleranceTurnedOffEvent"] = reflect.TypeOf((*VmFaultToleranceTurnedOffEvent)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceTurnedOffEvent"] = "4.0" } // This event records a secondary or primary VM is terminated. @@ -95368,7 +94836,6 @@ type VmFaultToleranceVmTerminatedEvent struct { func init() { t["VmFaultToleranceVmTerminatedEvent"] = reflect.TypeOf((*VmFaultToleranceVmTerminatedEvent)(nil)).Elem() - minAPIVersionForType["VmFaultToleranceVmTerminatedEvent"] = "4.0" } // This event notifies that a guest OS has crashed @@ -95378,7 +94845,6 @@ type VmGuestOSCrashedEvent struct { func init() { t["VmGuestOSCrashedEvent"] = reflect.TypeOf((*VmGuestOSCrashedEvent)(nil)).Elem() - minAPIVersionForType["VmGuestOSCrashedEvent"] = "6.0" } // This is a virtual machine guest reboot request event. @@ -95417,12 +94883,11 @@ type VmHealthMonitoringStateChangedEvent struct { State string `xml:"state" json:"state"` // The previous service state in // `ClusterDasConfigInfoVmMonitoringState_enum` - PrevState string `xml:"prevState,omitempty" json:"prevState,omitempty" vim:"6.5"` + PrevState string `xml:"prevState,omitempty" json:"prevState,omitempty"` } func init() { t["VmHealthMonitoringStateChangedEvent"] = reflect.TypeOf((*VmHealthMonitoringStateChangedEvent)(nil)).Elem() - minAPIVersionForType["VmHealthMonitoringStateChangedEvent"] = "4.0" } // The virtual machine if powered on or VMotioned, would violate a VM-Host affinity rule. @@ -95437,7 +94902,6 @@ type VmHostAffinityRuleViolation struct { func init() { t["VmHostAffinityRuleViolation"] = reflect.TypeOf((*VmHostAffinityRuleViolation)(nil)).Elem() - minAPIVersionForType["VmHostAffinityRuleViolation"] = "4.1" } type VmHostAffinityRuleViolationFault VmHostAffinityRuleViolation @@ -95457,7 +94921,6 @@ type VmInstanceUuidAssignedEvent struct { func init() { t["VmInstanceUuidAssignedEvent"] = reflect.TypeOf((*VmInstanceUuidAssignedEvent)(nil)).Elem() - minAPIVersionForType["VmInstanceUuidAssignedEvent"] = "4.0" } // This event records a change in a virtual machine's instance UUID. @@ -95472,7 +94935,6 @@ type VmInstanceUuidChangedEvent struct { func init() { t["VmInstanceUuidChangedEvent"] = reflect.TypeOf((*VmInstanceUuidChangedEvent)(nil)).Elem() - minAPIVersionForType["VmInstanceUuidChangedEvent"] = "4.0" } // This event records a conflict of virtual machine instance UUIDs. @@ -95488,7 +94950,6 @@ type VmInstanceUuidConflictEvent struct { func init() { t["VmInstanceUuidConflictEvent"] = reflect.TypeOf((*VmInstanceUuidConflictEvent)(nil)).Elem() - minAPIVersionForType["VmInstanceUuidConflictEvent"] = "4.0" } // A VmLimitLicense fault is thrown if powering on the virtual @@ -95585,7 +95046,6 @@ type VmMaxFTRestartCountReached struct { func init() { t["VmMaxFTRestartCountReached"] = reflect.TypeOf((*VmMaxFTRestartCountReached)(nil)).Elem() - minAPIVersionForType["VmMaxFTRestartCountReached"] = "4.0" } // This event is fired when the VM reached the max restart count @@ -95595,7 +95055,6 @@ type VmMaxRestartCountReached struct { func init() { t["VmMaxRestartCountReached"] = reflect.TypeOf((*VmMaxRestartCountReached)(nil)).Elem() - minAPIVersionForType["VmMaxRestartCountReached"] = "4.0" } // This event records when an error message (consisting of a collection of "observations") @@ -95615,7 +95074,6 @@ type VmMessageErrorEvent struct { func init() { t["VmMessageErrorEvent"] = reflect.TypeOf((*VmMessageErrorEvent)(nil)).Elem() - minAPIVersionForType["VmMessageErrorEvent"] = "4.0" } // This event records when an informational message (consisting of a collection of "observations") @@ -95630,7 +95088,7 @@ type VmMessageEvent struct { // A set of localizable message data that comprise this event. // // Only available on servers that support localization. - MessageInfo []VirtualMachineMessage `xml:"messageInfo,omitempty" json:"messageInfo,omitempty" vim:"2.5"` + MessageInfo []VirtualMachineMessage `xml:"messageInfo,omitempty" json:"messageInfo,omitempty"` } func init() { @@ -95651,7 +95109,6 @@ type VmMessageWarningEvent struct { func init() { t["VmMessageWarningEvent"] = reflect.TypeOf((*VmMessageWarningEvent)(nil)).Elem() - minAPIVersionForType["VmMessageWarningEvent"] = "4.0" } // This fault indicates that some error has occurred during the processing of @@ -95665,7 +95122,6 @@ type VmMetadataManagerFault struct { func init() { t["VmMetadataManagerFault"] = reflect.TypeOf((*VmMetadataManagerFault)(nil)).Elem() - minAPIVersionForType["VmMetadataManagerFault"] = "5.5" } type VmMetadataManagerFaultFault VmMetadataManagerFault @@ -95684,9 +95140,9 @@ type VmMigratedEvent struct { // the destination host is recorded in the inherited "host" property.) SourceHost HostEventArgument `xml:"sourceHost" json:"sourceHost"` // The source datacenter - SourceDatacenter *DatacenterEventArgument `xml:"sourceDatacenter,omitempty" json:"sourceDatacenter,omitempty" vim:"5.0"` + SourceDatacenter *DatacenterEventArgument `xml:"sourceDatacenter,omitempty" json:"sourceDatacenter,omitempty"` // The source primary datastore - SourceDatastore *DatastoreEventArgument `xml:"sourceDatastore,omitempty" json:"sourceDatastore,omitempty" vim:"5.0"` + SourceDatastore *DatastoreEventArgument `xml:"sourceDatastore,omitempty" json:"sourceDatastore,omitempty"` } func init() { @@ -95701,7 +95157,6 @@ type VmMonitorIncompatibleForFaultTolerance struct { func init() { t["VmMonitorIncompatibleForFaultTolerance"] = reflect.TypeOf((*VmMonitorIncompatibleForFaultTolerance)(nil)).Elem() - minAPIVersionForType["VmMonitorIncompatibleForFaultTolerance"] = "4.1" } type VmMonitorIncompatibleForFaultToleranceFault VmMonitorIncompatibleForFaultTolerance @@ -95721,7 +95176,6 @@ type VmNoCompatibleHostForSecondaryEvent struct { func init() { t["VmNoCompatibleHostForSecondaryEvent"] = reflect.TypeOf((*VmNoCompatibleHostForSecondaryEvent)(nil)).Elem() - minAPIVersionForType["VmNoCompatibleHostForSecondaryEvent"] = "4.0" } // This event records a migration failure when the destination host @@ -95800,7 +95254,6 @@ type VmPodConfigForPlacement struct { func init() { t["VmPodConfigForPlacement"] = reflect.TypeOf((*VmPodConfigForPlacement)(nil)).Elem() - minAPIVersionForType["VmPodConfigForPlacement"] = "5.0" } // The `VmPortGroupProfile` data object represents the subprofile @@ -95818,7 +95271,6 @@ type VmPortGroupProfile struct { func init() { t["VmPortGroupProfile"] = reflect.TypeOf((*VmPortGroupProfile)(nil)).Elem() - minAPIVersionForType["VmPortGroupProfile"] = "4.0" } // This event records when a virtual machine has been powered off on an isolated host @@ -95842,7 +95294,6 @@ type VmPowerOnDisabled struct { func init() { t["VmPowerOnDisabled"] = reflect.TypeOf((*VmPowerOnDisabled)(nil)).Elem() - minAPIVersionForType["VmPowerOnDisabled"] = "4.0" } type VmPowerOnDisabledFault VmPowerOnDisabled @@ -95881,7 +95332,6 @@ type VmPoweringOnWithCustomizedDVPortEvent struct { func init() { t["VmPoweringOnWithCustomizedDVPortEvent"] = reflect.TypeOf((*VmPoweringOnWithCustomizedDVPortEvent)(nil)).Elem() - minAPIVersionForType["VmPoweringOnWithCustomizedDVPortEvent"] = "4.0" } // This event records a fault tolerance failover. @@ -95899,7 +95349,6 @@ type VmPrimaryFailoverEvent struct { func init() { t["VmPrimaryFailoverEvent"] = reflect.TypeOf((*VmPrimaryFailoverEvent)(nil)).Elem() - minAPIVersionForType["VmPrimaryFailoverEvent"] = "4.0" } // This event records a reconfiguration of the virtual machine. @@ -95909,7 +95358,7 @@ type VmReconfiguredEvent struct { // The configuration specification that was used for the reconfiguration. ConfigSpec VirtualMachineConfigSpec `xml:"configSpec" json:"configSpec"` // The configuration values changed during the reconfiguration. - ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { @@ -95957,7 +95406,6 @@ type VmReloadFromPathEvent struct { func init() { t["VmReloadFromPathEvent"] = reflect.TypeOf((*VmReloadFromPathEvent)(nil)).Elem() - minAPIVersionForType["VmReloadFromPathEvent"] = "4.1" } // This event records that a virtual machine reload from a new configuration @@ -95970,7 +95418,6 @@ type VmReloadFromPathFailedEvent struct { func init() { t["VmReloadFromPathFailedEvent"] = reflect.TypeOf((*VmReloadFromPathFailedEvent)(nil)).Elem() - minAPIVersionForType["VmReloadFromPathFailedEvent"] = "4.1" } // This event records a failure to relocate a virtual machine. @@ -95982,9 +95429,9 @@ type VmRelocateFailedEvent struct { // The reason why this relocate operation failed. Reason LocalizedMethodFault `xml:"reason" json:"reason"` // The destination datacenter to which the virtual machine was being relocated - DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty" vim:"5.0"` + DestDatacenter *DatacenterEventArgument `xml:"destDatacenter,omitempty" json:"destDatacenter,omitempty"` // The destination primary datastore to which the virtual machine was being relocated - DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty" vim:"5.0"` + DestDatastore *DatastoreEventArgument `xml:"destDatastore,omitempty" json:"destDatastore,omitempty"` } func init() { @@ -96007,9 +95454,9 @@ type VmRelocatedEvent struct { // The source host from which the virtual machine was relocated. SourceHost HostEventArgument `xml:"sourceHost" json:"sourceHost"` // The source datacenter from which the virtual machine relocated - SourceDatacenter *DatacenterEventArgument `xml:"sourceDatacenter,omitempty" json:"sourceDatacenter,omitempty" vim:"5.0"` + SourceDatacenter *DatacenterEventArgument `xml:"sourceDatacenter,omitempty" json:"sourceDatacenter,omitempty"` // The source primary datastore from which the virtual machine relocated - SourceDatastore *DatastoreEventArgument `xml:"sourceDatastore,omitempty" json:"sourceDatastore,omitempty" vim:"5.0"` + SourceDatastore *DatastoreEventArgument `xml:"sourceDatastore,omitempty" json:"sourceDatastore,omitempty"` } func init() { @@ -96023,7 +95470,6 @@ type VmRemoteConsoleConnectedEvent struct { func init() { t["VmRemoteConsoleConnectedEvent"] = reflect.TypeOf((*VmRemoteConsoleConnectedEvent)(nil)).Elem() - minAPIVersionForType["VmRemoteConsoleConnectedEvent"] = "4.0" } // This event records that a remote console was disconnected from the VM @@ -96033,7 +95479,6 @@ type VmRemoteConsoleDisconnectedEvent struct { func init() { t["VmRemoteConsoleDisconnectedEvent"] = reflect.TypeOf((*VmRemoteConsoleDisconnectedEvent)(nil)).Elem() - minAPIVersionForType["VmRemoteConsoleDisconnectedEvent"] = "4.0" } // This event records a virtual machine removed from VirtualCenter management. @@ -96071,7 +95516,6 @@ type VmRequirementsExceedCurrentEVCModeEvent struct { func init() { t["VmRequirementsExceedCurrentEVCModeEvent"] = reflect.TypeOf((*VmRequirementsExceedCurrentEVCModeEvent)(nil)).Elem() - minAPIVersionForType["VmRequirementsExceedCurrentEVCModeEvent"] = "5.1" } // This event records a virtual machine resetting. @@ -96102,7 +95546,7 @@ type VmResourceReallocatedEvent struct { VmEvent // The configuration values changed during the reconfiguration. - ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty" vim:"6.5"` + ConfigChanges *ChangesInfoEventArgument `xml:"configChanges,omitempty" json:"configChanges,omitempty"` } func init() { @@ -96141,7 +95585,6 @@ type VmSecondaryAddedEvent struct { func init() { t["VmSecondaryAddedEvent"] = reflect.TypeOf((*VmSecondaryAddedEvent)(nil)).Elem() - minAPIVersionForType["VmSecondaryAddedEvent"] = "4.0" } // This event records that a fault tolerance secondary VM has been @@ -96154,7 +95597,6 @@ type VmSecondaryDisabledBySystemEvent struct { func init() { t["VmSecondaryDisabledBySystemEvent"] = reflect.TypeOf((*VmSecondaryDisabledBySystemEvent)(nil)).Elem() - minAPIVersionForType["VmSecondaryDisabledBySystemEvent"] = "4.0" } // This event records a secondary VM is disabled. @@ -96164,7 +95606,6 @@ type VmSecondaryDisabledEvent struct { func init() { t["VmSecondaryDisabledEvent"] = reflect.TypeOf((*VmSecondaryDisabledEvent)(nil)).Elem() - minAPIVersionForType["VmSecondaryDisabledEvent"] = "4.0" } // This event records a secondary VM is enabled. @@ -96174,7 +95615,6 @@ type VmSecondaryEnabledEvent struct { func init() { t["VmSecondaryEnabledEvent"] = reflect.TypeOf((*VmSecondaryEnabledEvent)(nil)).Elem() - minAPIVersionForType["VmSecondaryEnabledEvent"] = "4.0" } // This event records a secondary VM is started successfully. @@ -96184,7 +95624,6 @@ type VmSecondaryStartedEvent struct { func init() { t["VmSecondaryStartedEvent"] = reflect.TypeOf((*VmSecondaryStartedEvent)(nil)).Elem() - minAPIVersionForType["VmSecondaryStartedEvent"] = "4.0" } // This event records when a virtual machine has been shut down on an isolated host @@ -96203,7 +95642,6 @@ type VmShutdownOnIsolationEvent struct { func init() { t["VmShutdownOnIsolationEvent"] = reflect.TypeOf((*VmShutdownOnIsolationEvent)(nil)).Elem() - minAPIVersionForType["VmShutdownOnIsolationEvent"] = "4.0" } // This fault is returned when a host has more than the recommended number of @@ -96219,7 +95657,6 @@ type VmSmpFaultToleranceTooManyVMsOnHost struct { func init() { t["VmSmpFaultToleranceTooManyVMsOnHost"] = reflect.TypeOf((*VmSmpFaultToleranceTooManyVMsOnHost)(nil)).Elem() - minAPIVersionForType["VmSmpFaultToleranceTooManyVMsOnHost"] = "6.0" } type VmSmpFaultToleranceTooManyVMsOnHostFault VmSmpFaultToleranceTooManyVMsOnHost @@ -96256,7 +95693,6 @@ type VmStartRecordingEvent struct { func init() { t["VmStartRecordingEvent"] = reflect.TypeOf((*VmStartRecordingEvent)(nil)).Elem() - minAPIVersionForType["VmStartRecordingEvent"] = "4.0" } // Deprecated as of vSphere API 6.0. @@ -96268,7 +95704,6 @@ type VmStartReplayingEvent struct { func init() { t["VmStartReplayingEvent"] = reflect.TypeOf((*VmStartReplayingEvent)(nil)).Elem() - minAPIVersionForType["VmStartReplayingEvent"] = "4.0" } // This event records a virtual machine powering on. @@ -96287,7 +95722,6 @@ type VmStartingSecondaryEvent struct { func init() { t["VmStartingSecondaryEvent"] = reflect.TypeOf((*VmStartingSecondaryEvent)(nil)).Elem() - minAPIVersionForType["VmStartingSecondaryEvent"] = "4.0" } // This event records a static MAC address conflict for a virtual machine. @@ -96345,7 +95779,6 @@ type VmTimedoutStartingSecondaryEvent struct { func init() { t["VmTimedoutStartingSecondaryEvent"] = reflect.TypeOf((*VmTimedoutStartingSecondaryEvent)(nil)).Elem() - minAPIVersionForType["VmTimedoutStartingSecondaryEvent"] = "4.0" } // A base fault to indicate that something went wrong when upgrading tools. @@ -96485,7 +95918,6 @@ type VmVnicPoolReservationViolationClearEvent struct { func init() { t["VmVnicPoolReservationViolationClearEvent"] = reflect.TypeOf((*VmVnicPoolReservationViolationClearEvent)(nil)).Elem() - minAPIVersionForType["VmVnicPoolReservationViolationClearEvent"] = "6.0" } // This event is generated when the reservations used by all @@ -96502,7 +95934,6 @@ type VmVnicPoolReservationViolationRaiseEvent struct { func init() { t["VmVnicPoolReservationViolationRaiseEvent"] = reflect.TypeOf((*VmVnicPoolReservationViolationRaiseEvent)(nil)).Elem() - minAPIVersionForType["VmVnicPoolReservationViolationRaiseEvent"] = "6.0" } // This event records the assignment of a new WWN (World Wide Name) @@ -96518,7 +95949,6 @@ type VmWwnAssignedEvent struct { func init() { t["VmWwnAssignedEvent"] = reflect.TypeOf((*VmWwnAssignedEvent)(nil)).Elem() - minAPIVersionForType["VmWwnAssignedEvent"] = "2.5" } // This event records a change in a virtual machine's WWN (World Wide Name). @@ -96537,7 +95967,6 @@ type VmWwnChangedEvent struct { func init() { t["VmWwnChangedEvent"] = reflect.TypeOf((*VmWwnChangedEvent)(nil)).Elem() - minAPIVersionForType["VmWwnChangedEvent"] = "2.5" } // Thrown if a user attempts to assign a @@ -96561,7 +95990,6 @@ type VmWwnConflict struct { func init() { t["VmWwnConflict"] = reflect.TypeOf((*VmWwnConflict)(nil)).Elem() - minAPIVersionForType["VmWwnConflict"] = "2.5" } // This event records a conflict of virtual machine WWNs (World Wide Name). @@ -96580,7 +96008,6 @@ type VmWwnConflictEvent struct { func init() { t["VmWwnConflictEvent"] = reflect.TypeOf((*VmWwnConflictEvent)(nil)).Elem() - minAPIVersionForType["VmWwnConflictEvent"] = "2.5" } type VmWwnConflictFault VmWwnConflict @@ -96597,7 +96024,6 @@ type VmfsAlreadyMounted struct { func init() { t["VmfsAlreadyMounted"] = reflect.TypeOf((*VmfsAlreadyMounted)(nil)).Elem() - minAPIVersionForType["VmfsAlreadyMounted"] = "4.0" } type VmfsAlreadyMountedFault VmfsAlreadyMounted @@ -96619,7 +96045,6 @@ type VmfsAmbiguousMount struct { func init() { t["VmfsAmbiguousMount"] = reflect.TypeOf((*VmfsAmbiguousMount)(nil)).Elem() - minAPIVersionForType["VmfsAmbiguousMount"] = "4.0" } type VmfsAmbiguousMountFault VmfsAmbiguousMount @@ -96640,13 +96065,13 @@ type VmfsConfigOption struct { // The unit is KB. UnmapGranularityOption []int32 `xml:"unmapGranularityOption,omitempty" json:"unmapGranularityOption,omitempty"` // Fixed unmap bandwidth min/max/default value - UnmapBandwidthFixedValue *LongOption `xml:"unmapBandwidthFixedValue,omitempty" json:"unmapBandwidthFixedValue,omitempty" vim:"6.7"` + UnmapBandwidthFixedValue *LongOption `xml:"unmapBandwidthFixedValue,omitempty" json:"unmapBandwidthFixedValue,omitempty"` // Dynamic unmap bandwidth lower limit min/max/default value. - UnmapBandwidthDynamicMin *LongOption `xml:"unmapBandwidthDynamicMin,omitempty" json:"unmapBandwidthDynamicMin,omitempty" vim:"6.7"` + UnmapBandwidthDynamicMin *LongOption `xml:"unmapBandwidthDynamicMin,omitempty" json:"unmapBandwidthDynamicMin,omitempty"` // Dynamic unmap bandwitdth upper limit min/max/default value. - UnmapBandwidthDynamicMax *LongOption `xml:"unmapBandwidthDynamicMax,omitempty" json:"unmapBandwidthDynamicMax,omitempty" vim:"6.7"` + UnmapBandwidthDynamicMax *LongOption `xml:"unmapBandwidthDynamicMax,omitempty" json:"unmapBandwidthDynamicMax,omitempty"` // Increment value of unmap bandwidth - UnmapBandwidthIncrement int64 `xml:"unmapBandwidthIncrement,omitempty" json:"unmapBandwidthIncrement,omitempty" vim:"6.7"` + UnmapBandwidthIncrement int64 `xml:"unmapBandwidthIncrement,omitempty" json:"unmapBandwidthIncrement,omitempty"` // Fixed unmap bandwidth ultra low limit value in MB/sec. UnmapBandwidthUltraLow int64 `xml:"unmapBandwidthUltraLow,omitempty" json:"unmapBandwidthUltraLow,omitempty" vim:"8.0.0.1"` } @@ -96690,7 +96115,7 @@ type VmfsDatastoreBaseOption struct { // format type on the disk. // // See also `HostDiskPartitionInfoPartitionFormat_enum`. - PartitionFormatChange *bool `xml:"partitionFormatChange" json:"partitionFormatChange,omitempty" vim:"5.0"` + PartitionFormatChange *bool `xml:"partitionFormatChange" json:"partitionFormatChange,omitempty"` } func init() { @@ -96727,7 +96152,6 @@ type VmfsDatastoreExpandSpec struct { func init() { t["VmfsDatastoreExpandSpec"] = reflect.TypeOf((*VmfsDatastoreExpandSpec)(nil)).Elem() - minAPIVersionForType["VmfsDatastoreExpandSpec"] = "4.0" } // Specification to increase the capacity of a VMFS datastore by adding @@ -96755,9 +96179,9 @@ type VmfsDatastoreInfo struct { DatastoreInfo // Maximum raw device mapping size (physical compatibility) - MaxPhysicalRDMFileSize int64 `xml:"maxPhysicalRDMFileSize,omitempty" json:"maxPhysicalRDMFileSize,omitempty" vim:"5.1"` + MaxPhysicalRDMFileSize int64 `xml:"maxPhysicalRDMFileSize,omitempty" json:"maxPhysicalRDMFileSize,omitempty"` // Maximum raw device mapping size (virtual compatibility) - MaxVirtualRDMFileSize int64 `xml:"maxVirtualRDMFileSize,omitempty" json:"maxVirtualRDMFileSize,omitempty" vim:"5.1"` + MaxVirtualRDMFileSize int64 `xml:"maxVirtualRDMFileSize,omitempty" json:"maxVirtualRDMFileSize,omitempty"` // The VMFS volume information for the datastore. // // May not be @@ -96865,7 +96289,6 @@ type VmfsMountFault struct { func init() { t["VmfsMountFault"] = reflect.TypeOf((*VmfsMountFault)(nil)).Elem() - minAPIVersionForType["VmfsMountFault"] = "4.0" } type VmfsMountFaultFault BaseVmfsMountFault @@ -96900,7 +96323,6 @@ type VmfsUnmapBandwidthSpec struct { func init() { t["VmfsUnmapBandwidthSpec"] = reflect.TypeOf((*VmfsUnmapBandwidthSpec)(nil)).Elem() - minAPIVersionForType["VmfsUnmapBandwidthSpec"] = "6.7" } // This fault is thrown when the Vmotion Interface on this host is not enabled. @@ -96912,7 +96334,6 @@ type VmotionInterfaceNotEnabled struct { func init() { t["VmotionInterfaceNotEnabled"] = reflect.TypeOf((*VmotionInterfaceNotEnabled)(nil)).Elem() - minAPIVersionForType["VmotionInterfaceNotEnabled"] = "2.5" } type VmotionInterfaceNotEnabledFault VmotionInterfaceNotEnabled @@ -96921,6 +96342,51 @@ func init() { t["VmotionInterfaceNotEnabledFault"] = reflect.TypeOf((*VmotionInterfaceNotEnabledFault)(nil)).Elem() } +// This data structure defines the failover policy for a distributed +// virtual switch when network offload is enabled, specifically +// related to the Data Processing Unit(DPU). +// +// The active and standby uplinks are expected to be backed by different +// DPUs to provide redundancy. If DPU backing active uplinks fails, then +// the standby DPU takes over to ensure uninterrupted network connectivity. +type VmwareDistributedVirtualSwitchDpuFailoverPolicy struct { + DynamicData + + // The name of the active uplink(s). + // + // These uplink(s) must be backed + // by vmnic(s) from a single DPU. + ActiveUplink []string `xml:"activeUplink,omitempty" json:"activeUplink,omitempty"` + // The name of the standby uplink(s). + // + // These uplink(s) must be backed + // by vmnic(s) from a different DPU than the active uplink(s). + // An empty standbyUplink indicates that no failover action will be + // taken after the active DPU fails. + StandbyUplink []string `xml:"standbyUplink,omitempty" json:"standbyUplink,omitempty"` +} + +func init() { + t["VmwareDistributedVirtualSwitchDpuFailoverPolicy"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchDpuFailoverPolicy)(nil)).Elem() + minAPIVersionForType["VmwareDistributedVirtualSwitchDpuFailoverPolicy"] = "8.0.3.0" +} + +// This data structure defines the network offoad specific configuration of +// a distributed virtual switch. +type VmwareDistributedVirtualSwitchNetworkOffloadConfig struct { + DynamicData + + // The DPU failover policy of the switch. + // + // If this property is not set, all uplink ports are active uplinks. + DpuFailoverPolicy *VmwareDistributedVirtualSwitchDpuFailoverPolicy `xml:"dpuFailoverPolicy,omitempty" json:"dpuFailoverPolicy,omitempty"` +} + +func init() { + t["VmwareDistributedVirtualSwitchNetworkOffloadConfig"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchNetworkOffloadConfig)(nil)).Elem() + minAPIVersionForType["VmwareDistributedVirtualSwitchNetworkOffloadConfig"] = "8.0.3.0" +} + // This data type defines the configuration when PVLAN id is to be // used for the ports. type VmwareDistributedVirtualSwitchPvlanSpec struct { @@ -96932,7 +96398,6 @@ type VmwareDistributedVirtualSwitchPvlanSpec struct { func init() { t["VmwareDistributedVirtualSwitchPvlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchPvlanSpec)(nil)).Elem() - minAPIVersionForType["VmwareDistributedVirtualSwitchPvlanSpec"] = "4.0" } // This data type specifies that the port uses trunk mode, @@ -96949,7 +96414,6 @@ type VmwareDistributedVirtualSwitchTrunkVlanSpec struct { func init() { t["VmwareDistributedVirtualSwitchTrunkVlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchTrunkVlanSpec)(nil)).Elem() - minAPIVersionForType["VmwareDistributedVirtualSwitchTrunkVlanSpec"] = "4.0" } // This data type defines the configuration when single vlanId is used for @@ -96960,15 +96424,14 @@ type VmwareDistributedVirtualSwitchVlanIdSpec struct { // The VLAN ID for ports. // // Possible values: - // - A value of 0 specifies that you do not want the port associated - // with a VLAN. - // - A value from 1 to 4094 specifies a VLAN ID for the port. + // - A value of 0 specifies that you do not want the port associated + // with a VLAN. + // - A value from 1 to 4094 specifies a VLAN ID for the port. VlanId int32 `xml:"vlanId" json:"vlanId"` } func init() { t["VmwareDistributedVirtualSwitchVlanIdSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanIdSpec)(nil)).Elem() - minAPIVersionForType["VmwareDistributedVirtualSwitchVlanIdSpec"] = "4.0" } // Base class for Vlan Specifiation for ports. @@ -96978,7 +96441,6 @@ type VmwareDistributedVirtualSwitchVlanSpec struct { func init() { t["VmwareDistributedVirtualSwitchVlanSpec"] = reflect.TypeOf((*VmwareDistributedVirtualSwitchVlanSpec)(nil)).Elem() - minAPIVersionForType["VmwareDistributedVirtualSwitchVlanSpec"] = "4.0" } // Policy for a uplink port team. @@ -97015,7 +96477,6 @@ type VmwareUplinkPortTeamingPolicy struct { func init() { t["VmwareUplinkPortTeamingPolicy"] = reflect.TypeOf((*VmwareUplinkPortTeamingPolicy)(nil)).Elem() - minAPIVersionForType["VmwareUplinkPortTeamingPolicy"] = "4.0" } // This argument records a Virtual NIC device that connects to a DVPort. @@ -97030,7 +96491,6 @@ type VnicPortArgument struct { func init() { t["VnicPortArgument"] = reflect.TypeOf((*VnicPortArgument)(nil)).Elem() - minAPIVersionForType["VnicPortArgument"] = "4.0" } // An error occurred in the Open Source Components applications during @@ -97066,7 +96526,6 @@ type VramLimitLicense struct { func init() { t["VramLimitLicense"] = reflect.TypeOf((*VramLimitLicense)(nil)).Elem() - minAPIVersionForType["VramLimitLicense"] = "5.0" } type VramLimitLicenseFault VramLimitLicense @@ -97105,7 +96564,6 @@ type VsanClusterConfigInfo struct { func init() { t["VsanClusterConfigInfo"] = reflect.TypeOf((*VsanClusterConfigInfo)(nil)).Elem() - minAPIVersionForType["VsanClusterConfigInfo"] = "5.5" } // Default VSAN service configuration to be used for hosts admitted @@ -97149,12 +96607,11 @@ type VsanClusterConfigInfoHostDefaultInfo struct { // disk status. // Changing this value to true shall do disk enforcement // check that all VSAN disks are checksum enabled. - ChecksumEnabled *bool `xml:"checksumEnabled" json:"checksumEnabled,omitempty" vim:"6.0"` + ChecksumEnabled *bool `xml:"checksumEnabled" json:"checksumEnabled,omitempty"` } func init() { t["VsanClusterConfigInfoHostDefaultInfo"] = reflect.TypeOf((*VsanClusterConfigInfoHostDefaultInfo)(nil)).Elem() - minAPIVersionForType["VsanClusterConfigInfoHostDefaultInfo"] = "5.5" } // Fault thrown for the case that an attempt is made to move a host which @@ -97174,7 +96631,6 @@ type VsanClusterUuidMismatch struct { func init() { t["VsanClusterUuidMismatch"] = reflect.TypeOf((*VsanClusterUuidMismatch)(nil)).Elem() - minAPIVersionForType["VsanClusterUuidMismatch"] = "5.5" } type VsanClusterUuidMismatchFault VsanClusterUuidMismatch @@ -97210,7 +96666,6 @@ type VsanDiskFault struct { func init() { t["VsanDiskFault"] = reflect.TypeOf((*VsanDiskFault)(nil)).Elem() - minAPIVersionForType["VsanDiskFault"] = "5.5" } type VsanDiskFaultFault BaseVsanDiskFault @@ -97229,7 +96684,6 @@ type VsanFault struct { func init() { t["VsanFault"] = reflect.TypeOf((*VsanFault)(nil)).Elem() - minAPIVersionForType["VsanFault"] = "5.5" } type VsanFaultFault BaseVsanFault @@ -97264,7 +96718,6 @@ type VsanHostClusterStatus struct { func init() { t["VsanHostClusterStatus"] = reflect.TypeOf((*VsanHostClusterStatus)(nil)).Elem() - minAPIVersionForType["VsanHostClusterStatus"] = "5.5" } // Data object representing the VSAN node state for a host. @@ -97284,7 +96737,6 @@ type VsanHostClusterStatusState struct { func init() { t["VsanHostClusterStatusState"] = reflect.TypeOf((*VsanHostClusterStatusState)(nil)).Elem() - minAPIVersionForType["VsanHostClusterStatusState"] = "5.5" } // Estimated completion status for transitory node states. @@ -97301,7 +96753,6 @@ type VsanHostClusterStatusStateCompletionEstimate struct { func init() { t["VsanHostClusterStatusStateCompletionEstimate"] = reflect.TypeOf((*VsanHostClusterStatusStateCompletionEstimate)(nil)).Elem() - minAPIVersionForType["VsanHostClusterStatusStateCompletionEstimate"] = "5.5" } // The `VsanHostConfigInfo` data object contains host-specific settings @@ -97341,7 +96792,7 @@ type VsanHostConfigInfo struct { // // VSAN host fault domain settings are independent of the // current value of `VsanHostConfigInfo.enabled`. - FaultDomainInfo *VsanHostFaultDomainInfo `xml:"faultDomainInfo,omitempty" json:"faultDomainInfo,omitempty" vim:"6.0"` + FaultDomainInfo *VsanHostFaultDomainInfo `xml:"faultDomainInfo,omitempty" json:"faultDomainInfo,omitempty"` // Whether the vSAN ESA is enabled on this host. // // This can only be @@ -97351,7 +96802,6 @@ type VsanHostConfigInfo struct { func init() { t["VsanHostConfigInfo"] = reflect.TypeOf((*VsanHostConfigInfo)(nil)).Elem() - minAPIVersionForType["VsanHostConfigInfo"] = "5.5" } // Host-local VSAN cluster configuration. @@ -97380,7 +96830,6 @@ type VsanHostConfigInfoClusterInfo struct { func init() { t["VsanHostConfigInfoClusterInfo"] = reflect.TypeOf((*VsanHostConfigInfoClusterInfo)(nil)).Elem() - minAPIVersionForType["VsanHostConfigInfoClusterInfo"] = "5.5" } // Host-local VSAN network configuration. @@ -97398,7 +96847,6 @@ type VsanHostConfigInfoNetworkInfo struct { func init() { t["VsanHostConfigInfoNetworkInfo"] = reflect.TypeOf((*VsanHostConfigInfoNetworkInfo)(nil)).Elem() - minAPIVersionForType["VsanHostConfigInfoNetworkInfo"] = "5.5" } // A PortConfig represents a virtual network adapter and its @@ -97419,7 +96867,6 @@ type VsanHostConfigInfoNetworkInfoPortConfig struct { func init() { t["VsanHostConfigInfoNetworkInfoPortConfig"] = reflect.TypeOf((*VsanHostConfigInfoNetworkInfoPortConfig)(nil)).Elem() - minAPIVersionForType["VsanHostConfigInfoNetworkInfoPortConfig"] = "5.5" } // Host-local VSAN storage configuration. @@ -97457,7 +96904,7 @@ type VsanHostConfigInfoStorageInfo struct { DiskMapping []VsanHostDiskMapping `xml:"diskMapping,omitempty" json:"diskMapping,omitempty"` // List of `VsanHostDiskMapping` entries with runtime information from // the perspective of this host. - DiskMapInfo []VsanHostDiskMapInfo `xml:"diskMapInfo,omitempty" json:"diskMapInfo,omitempty" vim:"6.0"` + DiskMapInfo []VsanHostDiskMapInfo `xml:"diskMapInfo,omitempty" json:"diskMapInfo,omitempty"` // Deprecated this attribute was originally used for indicating whether // hardware checksums is supported on the disks. But in vSphere 2016 // hardware checksums are replaced with software implementation, @@ -97468,12 +96915,11 @@ type VsanHostConfigInfoStorageInfo struct { // // If any disk is not checksum capable or 520 bps formatted, // we will skip it. - ChecksumEnabled *bool `xml:"checksumEnabled" json:"checksumEnabled,omitempty" vim:"6.0"` + ChecksumEnabled *bool `xml:"checksumEnabled" json:"checksumEnabled,omitempty"` } func init() { t["VsanHostConfigInfoStorageInfo"] = reflect.TypeOf((*VsanHostConfigInfoStorageInfo)(nil)).Elem() - minAPIVersionForType["VsanHostConfigInfoStorageInfo"] = "5.5" } // A `VsanHostDecommissionMode` defines an action to take upon decommissioning @@ -97496,7 +96942,6 @@ type VsanHostDecommissionMode struct { func init() { t["VsanHostDecommissionMode"] = reflect.TypeOf((*VsanHostDecommissionMode)(nil)).Elem() - minAPIVersionForType["VsanHostDecommissionMode"] = "5.5" } // A DiskMapInfo represents a `VsanHostDiskMapping` and its @@ -97514,7 +96959,6 @@ type VsanHostDiskMapInfo struct { func init() { t["VsanHostDiskMapInfo"] = reflect.TypeOf((*VsanHostDiskMapInfo)(nil)).Elem() - minAPIVersionForType["VsanHostDiskMapInfo"] = "6.0" } // A DiskMapResult represents the result of an operation performed @@ -97536,7 +96980,6 @@ type VsanHostDiskMapResult struct { func init() { t["VsanHostDiskMapResult"] = reflect.TypeOf((*VsanHostDiskMapResult)(nil)).Elem() - minAPIVersionForType["VsanHostDiskMapResult"] = "5.5" } // A `VsanHostDiskMapping` is a set of one SSD `HostScsiDisk` backed @@ -97559,7 +97002,6 @@ type VsanHostDiskMapping struct { func init() { t["VsanHostDiskMapping"] = reflect.TypeOf((*VsanHostDiskMapping)(nil)).Elem() - minAPIVersionForType["VsanHostDiskMapping"] = "5.5" } // A DiskResult represents the result of VSAN configuration operation @@ -97587,12 +97029,11 @@ type VsanHostDiskResult struct { // // If set, indicates the disk performance is degraded in VSAN // If unset, it is unknown whether the disk performance is degraded in VSAN. - Degraded *bool `xml:"degraded" json:"degraded,omitempty" vim:"6.0"` + Degraded *bool `xml:"degraded" json:"degraded,omitempty"` } func init() { t["VsanHostDiskResult"] = reflect.TypeOf((*VsanHostDiskResult)(nil)).Elem() - minAPIVersionForType["VsanHostDiskResult"] = "5.5" } // Host-local VSAN fault domain configuration. @@ -97612,7 +97053,6 @@ type VsanHostFaultDomainInfo struct { func init() { t["VsanHostFaultDomainInfo"] = reflect.TypeOf((*VsanHostFaultDomainInfo)(nil)).Elem() - minAPIVersionForType["VsanHostFaultDomainInfo"] = "6.0" } // An `VsanHostIpConfig` is a pair of multicast IP addresses for use by the VSAN @@ -97633,7 +97073,6 @@ type VsanHostIpConfig struct { func init() { t["VsanHostIpConfig"] = reflect.TypeOf((*VsanHostIpConfig)(nil)).Elem() - minAPIVersionForType["VsanHostIpConfig"] = "5.5" } // The `VsanHostMembershipInfo` data object contains VSAN cluster @@ -97659,7 +97098,6 @@ type VsanHostMembershipInfo struct { func init() { t["VsanHostMembershipInfo"] = reflect.TypeOf((*VsanHostMembershipInfo)(nil)).Elem() - minAPIVersionForType["VsanHostMembershipInfo"] = "5.5" } // This data object contains VSAN cluster runtime information from @@ -97683,7 +97121,6 @@ type VsanHostRuntimeInfo struct { func init() { t["VsanHostRuntimeInfo"] = reflect.TypeOf((*VsanHostRuntimeInfo)(nil)).Elem() - minAPIVersionForType["VsanHostRuntimeInfo"] = "5.5" } // Data structure of reporting a disk issue. @@ -97700,7 +97137,6 @@ type VsanHostRuntimeInfoDiskIssue struct { func init() { t["VsanHostRuntimeInfoDiskIssue"] = reflect.TypeOf((*VsanHostRuntimeInfoDiskIssue)(nil)).Elem() - minAPIVersionForType["VsanHostRuntimeInfoDiskIssue"] = "5.5" } // A VsanDiskInfo represents the additional detailed @@ -97719,7 +97155,6 @@ type VsanHostVsanDiskInfo struct { func init() { t["VsanHostVsanDiskInfo"] = reflect.TypeOf((*VsanHostVsanDiskInfo)(nil)).Elem() - minAPIVersionForType["VsanHostVsanDiskInfo"] = "6.0" } // Fault used for the add operation which will result in incompatible @@ -97732,7 +97167,6 @@ type VsanIncompatibleDiskMapping struct { func init() { t["VsanIncompatibleDiskMapping"] = reflect.TypeOf((*VsanIncompatibleDiskMapping)(nil)).Elem() - minAPIVersionForType["VsanIncompatibleDiskMapping"] = "6.0" } type VsanIncompatibleDiskMappingFault VsanIncompatibleDiskMapping @@ -97755,7 +97189,6 @@ type VsanNewPolicyBatch struct { func init() { t["VsanNewPolicyBatch"] = reflect.TypeOf((*VsanNewPolicyBatch)(nil)).Elem() - minAPIVersionForType["VsanNewPolicyBatch"] = "5.5" } // PolicyChangeBatch -- @@ -97772,7 +97205,6 @@ type VsanPolicyChangeBatch struct { func init() { t["VsanPolicyChangeBatch"] = reflect.TypeOf((*VsanPolicyChangeBatch)(nil)).Elem() - minAPIVersionForType["VsanPolicyChangeBatch"] = "5.5" } // PolicyCost -- @@ -97806,7 +97238,7 @@ type VsanPolicyCost struct { // // For eg. an object of size 1GB with two copies of the // data has two 1GB replicas and so this ratio is 2. - CurrentDiskSpaceToAddressSpaceRatio float32 `xml:"currentDiskSpaceToAddressSpaceRatio,omitempty" json:"currentDiskSpaceToAddressSpaceRatio,omitempty" vim:"6.0"` + CurrentDiskSpaceToAddressSpaceRatio float32 `xml:"currentDiskSpaceToAddressSpaceRatio,omitempty" json:"currentDiskSpaceToAddressSpaceRatio,omitempty"` // Ratio of physical disk space of an object to the logical VSAN // address space after new policy is applied. // @@ -97818,7 +97250,6 @@ type VsanPolicyCost struct { func init() { t["VsanPolicyCost"] = reflect.TypeOf((*VsanPolicyCost)(nil)).Elem() - minAPIVersionForType["VsanPolicyCost"] = "5.5" } // PolicySatisfiablity -- @@ -97843,7 +97274,6 @@ type VsanPolicySatisfiability struct { func init() { t["VsanPolicySatisfiability"] = reflect.TypeOf((*VsanPolicySatisfiability)(nil)).Elem() - minAPIVersionForType["VsanPolicySatisfiability"] = "5.5" } // Pre-flight check encountered a VC plumbing issue. @@ -97858,7 +97288,6 @@ type VsanUpgradeSystemAPIBrokenIssue struct { func init() { t["VsanUpgradeSystemAPIBrokenIssue"] = reflect.TypeOf((*VsanUpgradeSystemAPIBrokenIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemAPIBrokenIssue"] = "6.0" } // Pre-flight check encountered at least one host with auto-claim enabled. @@ -97873,7 +97302,6 @@ type VsanUpgradeSystemAutoClaimEnabledOnHostsIssue struct { func init() { t["VsanUpgradeSystemAutoClaimEnabledOnHostsIssue"] = reflect.TypeOf((*VsanUpgradeSystemAutoClaimEnabledOnHostsIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemAutoClaimEnabledOnHostsIssue"] = "6.0" } // Pre-flight check encountered at least one host that is disconnected @@ -97889,7 +97317,6 @@ type VsanUpgradeSystemHostsDisconnectedIssue struct { func init() { t["VsanUpgradeSystemHostsDisconnectedIssue"] = reflect.TypeOf((*VsanUpgradeSystemHostsDisconnectedIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemHostsDisconnectedIssue"] = "6.0" } // Pre-flight check encountered at least one host that is part of the @@ -97905,7 +97332,6 @@ type VsanUpgradeSystemMissingHostsInClusterIssue struct { func init() { t["VsanUpgradeSystemMissingHostsInClusterIssue"] = reflect.TypeOf((*VsanUpgradeSystemMissingHostsInClusterIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemMissingHostsInClusterIssue"] = "6.0" } // Information about a particular group of hosts making up a network partition. @@ -97920,7 +97346,6 @@ type VsanUpgradeSystemNetworkPartitionInfo struct { func init() { t["VsanUpgradeSystemNetworkPartitionInfo"] = reflect.TypeOf((*VsanUpgradeSystemNetworkPartitionInfo)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemNetworkPartitionInfo"] = "6.0" } // Pre-flight check encountered a network partition. @@ -97936,7 +97361,6 @@ type VsanUpgradeSystemNetworkPartitionIssue struct { func init() { t["VsanUpgradeSystemNetworkPartitionIssue"] = reflect.TypeOf((*VsanUpgradeSystemNetworkPartitionIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemNetworkPartitionIssue"] = "6.0" } // Pre-flight check encountered not enough free disk capacity to maintain policy compliance. @@ -97950,7 +97374,6 @@ type VsanUpgradeSystemNotEnoughFreeCapacityIssue struct { func init() { t["VsanUpgradeSystemNotEnoughFreeCapacityIssue"] = reflect.TypeOf((*VsanUpgradeSystemNotEnoughFreeCapacityIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemNotEnoughFreeCapacityIssue"] = "6.0" } // Base class for a pre-flight check issue. @@ -97966,7 +97389,6 @@ type VsanUpgradeSystemPreflightCheckIssue struct { func init() { t["VsanUpgradeSystemPreflightCheckIssue"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemPreflightCheckIssue"] = "6.0" } // Captures the result of a VSAN upgrade pre-flight check. @@ -97991,7 +97413,6 @@ type VsanUpgradeSystemPreflightCheckResult struct { func init() { t["VsanUpgradeSystemPreflightCheckResult"] = reflect.TypeOf((*VsanUpgradeSystemPreflightCheckResult)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemPreflightCheckResult"] = "6.0" } // Pre-flight check encountered at least one host that is part of the VSAN @@ -98005,7 +97426,6 @@ type VsanUpgradeSystemRogueHostsInClusterIssue struct { func init() { t["VsanUpgradeSystemRogueHostsInClusterIssue"] = reflect.TypeOf((*VsanUpgradeSystemRogueHostsInClusterIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemRogueHostsInClusterIssue"] = "6.0" } // The upgrade process removed or added VSAN from/to a disk group. @@ -98027,7 +97447,6 @@ type VsanUpgradeSystemUpgradeHistoryDiskGroupOp struct { func init() { t["VsanUpgradeSystemUpgradeHistoryDiskGroupOp"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryDiskGroupOp)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemUpgradeHistoryDiskGroupOp"] = "6.0" } // Captures one "log entry" of an upgrade process. @@ -98056,7 +97475,6 @@ type VsanUpgradeSystemUpgradeHistoryItem struct { func init() { t["VsanUpgradeSystemUpgradeHistoryItem"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryItem)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemUpgradeHistoryItem"] = "6.0" } // Upgrade process encountered a pre-flight check failure. @@ -98072,7 +97490,6 @@ type VsanUpgradeSystemUpgradeHistoryPreflightFail struct { func init() { t["VsanUpgradeSystemUpgradeHistoryPreflightFail"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeHistoryPreflightFail)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemUpgradeHistoryPreflightFail"] = "6.0" } // Captures the status of a VSAN cluster on-disk format upgrade. @@ -98103,7 +97520,6 @@ type VsanUpgradeSystemUpgradeStatus struct { func init() { t["VsanUpgradeSystemUpgradeStatus"] = reflect.TypeOf((*VsanUpgradeSystemUpgradeStatus)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemUpgradeStatus"] = "6.0" } // Pre-flight check encountered v2 objects preventing a downgrade. @@ -98116,7 +97532,6 @@ type VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue struct { func init() { t["VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue"] = reflect.TypeOf((*VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemV2ObjectsPresentDuringDowngradeIssue"] = "6.0" } // Pre-flight check encountered at least one host with wrong ESX version. @@ -98133,7 +97548,6 @@ type VsanUpgradeSystemWrongEsxVersionIssue struct { func init() { t["VsanUpgradeSystemWrongEsxVersionIssue"] = reflect.TypeOf((*VsanUpgradeSystemWrongEsxVersionIssue)(nil)).Elem() - minAPIVersionForType["VsanUpgradeSystemWrongEsxVersionIssue"] = "6.0" } // Specification of cloning a virtual storage object. @@ -98145,7 +97559,7 @@ type VslmCloneSpec struct { // Choice of the deletion behavior of this virtual storage object. // // If not set, the default value is false. - KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm" json:"keepAfterDeleteVm,omitempty" vim:"6.7"` + KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm" json:"keepAfterDeleteVm,omitempty"` // The metadata KV pairs that are supposed to be updated on the destination // virtual storage object. // @@ -98153,12 +97567,11 @@ type VslmCloneSpec struct { // said, failing to update the specified metadata pairs leads to the failure // of the clone task. If unset, no metadata will be updated. An empty string // value is indicative of a vcenter tag. - Metadata []KeyValue `xml:"metadata,omitempty" json:"metadata,omitempty" vim:"6.7.2"` + Metadata []KeyValue `xml:"metadata,omitempty" json:"metadata,omitempty"` } func init() { t["VslmCloneSpec"] = reflect.TypeOf((*VslmCloneSpec)(nil)).Elem() - minAPIVersionForType["VslmCloneSpec"] = "6.5" } // Specification to create a virtual storage object. @@ -98170,7 +97583,7 @@ type VslmCreateSpec struct { // Choice of the deletion behavior of this virtual storage object. // // If not set, the default value is true. - KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm" json:"keepAfterDeleteVm,omitempty" vim:"6.7"` + KeepAfterDeleteVm *bool `xml:"keepAfterDeleteVm" json:"keepAfterDeleteVm,omitempty"` // Specification of the backings of the virtual storage object. BackingSpec BaseVslmCreateSpecBackingSpec `xml:"backingSpec,typeattr" json:"backingSpec"` // Size in MB of the virtual storage object. @@ -98179,14 +97592,14 @@ type VslmCreateSpec struct { // // If unset, // the default behavior will apply. - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty" vim:"6.7"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty"` // Crypto operation of the disk. // // If unset and if `VslmCreateSpec.profile` contains an encryption iofilter, // then crypto will be of type CryptoSpecEncrypt, and filled with // keyId that is automatically generated and keyProviderId that is the // default kms cluster. - Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr" json:"crypto,omitempty" vim:"7.0"` + Crypto BaseCryptoSpec `xml:"crypto,omitempty,typeattr" json:"crypto,omitempty"` // The metadata KV pairs that are supposed to be created on the newly created // virtual storage object. // @@ -98194,12 +97607,11 @@ type VslmCreateSpec struct { // said, failing to add the specified metadata pairs leads to the failure // of the create task. If unset, no metadata will be added. An empty string // value is indicative of a vcenter tag. - Metadata []KeyValue `xml:"metadata,omitempty" json:"metadata,omitempty" vim:"6.7.2"` + Metadata []KeyValue `xml:"metadata,omitempty" json:"metadata,omitempty"` } func init() { t["VslmCreateSpec"] = reflect.TypeOf((*VslmCreateSpec)(nil)).Elem() - minAPIVersionForType["VslmCreateSpec"] = "6.5" } // Specification of the backing of a virtual @@ -98216,12 +97628,11 @@ type VslmCreateSpecBackingSpec struct { // // If not specified disk gets created at the defualt // VStorageObject location on the specified datastore. - Path string `xml:"path,omitempty" json:"path,omitempty" vim:"6.7"` + Path string `xml:"path,omitempty" json:"path,omitempty"` } func init() { t["VslmCreateSpecBackingSpec"] = reflect.TypeOf((*VslmCreateSpecBackingSpec)(nil)).Elem() - minAPIVersionForType["VslmCreateSpecBackingSpec"] = "6.5" } // Specification of the disk file backing of a virtual @@ -98242,7 +97653,6 @@ type VslmCreateSpecDiskFileBackingSpec struct { func init() { t["VslmCreateSpecDiskFileBackingSpec"] = reflect.TypeOf((*VslmCreateSpecDiskFileBackingSpec)(nil)).Elem() - minAPIVersionForType["VslmCreateSpecDiskFileBackingSpec"] = "6.5" } // Specification of the rdm backing of a virtual @@ -98263,7 +97673,6 @@ type VslmCreateSpecRawDiskMappingBackingSpec struct { func init() { t["VslmCreateSpecRawDiskMappingBackingSpec"] = reflect.TypeOf((*VslmCreateSpecRawDiskMappingBackingSpec)(nil)).Elem() - minAPIVersionForType["VslmCreateSpecRawDiskMappingBackingSpec"] = "6.5" } // Base specification of moving or copying a virtual storage object. @@ -98276,7 +97685,7 @@ type VslmMigrateSpec struct { // // If unset, // the default behavior will apply. - Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty" vim:"6.7"` + Profile []BaseVirtualMachineProfileSpec `xml:"profile,omitempty,typeattr" json:"profile,omitempty"` // Flag indicates any delta disk backings will be consolidated // during migration. // @@ -98300,12 +97709,16 @@ type VslmMigrateSpec struct { // source VStorageObject is encrypted, then disksCyrpto is treated as // CryptoSpecDecrypt, during migration, the object will be decrypted. // To recrypt the disk during migration, disksCrypto has to be present. - DisksCrypto *DiskCryptoSpec `xml:"disksCrypto,omitempty" json:"disksCrypto,omitempty" vim:"7.0"` + DisksCrypto *DiskCryptoSpec `xml:"disksCrypto,omitempty" json:"disksCrypto,omitempty"` + // The service endpoint of vCenter where the FCD should be located. + // + // If + // not specified the current vCenter service is used. + Service *ServiceLocator `xml:"service,omitempty" json:"service,omitempty" vim:"8.0.3.0"` } func init() { t["VslmMigrateSpec"] = reflect.TypeOf((*VslmMigrateSpec)(nil)).Elem() - minAPIVersionForType["VslmMigrateSpec"] = "6.5" } // Specification for relocating a virtual storage object. @@ -98315,7 +97728,6 @@ type VslmRelocateSpec struct { func init() { t["VslmRelocateSpec"] = reflect.TypeOf((*VslmRelocateSpec)(nil)).Elem() - minAPIVersionForType["VslmRelocateSpec"] = "6.5" } // Specification of the Tag-Association tuple of Dataservice Tagging package. @@ -98332,7 +97744,6 @@ type VslmTagEntry struct { func init() { t["VslmTagEntry"] = reflect.TypeOf((*VslmTagEntry)(nil)).Elem() - minAPIVersionForType["VslmTagEntry"] = "6.5" } // Thrown if a dvPort is used as destination in multiple Distributed Port Mirroring sessions. @@ -98351,7 +97762,6 @@ type VspanDestPortConflict struct { func init() { t["VspanDestPortConflict"] = reflect.TypeOf((*VspanDestPortConflict)(nil)).Elem() - minAPIVersionForType["VspanDestPortConflict"] = "5.0" } type VspanDestPortConflictFault VspanDestPortConflict @@ -98375,7 +97785,6 @@ type VspanPortConflict struct { func init() { t["VspanPortConflict"] = reflect.TypeOf((*VspanPortConflict)(nil)).Elem() - minAPIVersionForType["VspanPortConflict"] = "5.0" } type VspanPortConflictFault VspanPortConflict @@ -98400,7 +97809,6 @@ type VspanPortMoveFault struct { func init() { t["VspanPortMoveFault"] = reflect.TypeOf((*VspanPortMoveFault)(nil)).Elem() - minAPIVersionForType["VspanPortMoveFault"] = "5.0" } type VspanPortMoveFaultFault VspanPortMoveFault @@ -98420,7 +97828,6 @@ type VspanPortPromiscChangeFault struct { func init() { t["VspanPortPromiscChangeFault"] = reflect.TypeOf((*VspanPortPromiscChangeFault)(nil)).Elem() - minAPIVersionForType["VspanPortPromiscChangeFault"] = "5.0" } type VspanPortPromiscChangeFaultFault VspanPortPromiscChangeFault @@ -98441,7 +97848,6 @@ type VspanPortgroupPromiscChangeFault struct { func init() { t["VspanPortgroupPromiscChangeFault"] = reflect.TypeOf((*VspanPortgroupPromiscChangeFault)(nil)).Elem() - minAPIVersionForType["VspanPortgroupPromiscChangeFault"] = "5.0" } type VspanPortgroupPromiscChangeFaultFault VspanPortgroupPromiscChangeFault @@ -98462,7 +97868,6 @@ type VspanPortgroupTypeChangeFault struct { func init() { t["VspanPortgroupTypeChangeFault"] = reflect.TypeOf((*VspanPortgroupTypeChangeFault)(nil)).Elem() - minAPIVersionForType["VspanPortgroupTypeChangeFault"] = "5.0" } type VspanPortgroupTypeChangeFaultFault VspanPortgroupTypeChangeFault @@ -98486,7 +97891,6 @@ type VspanPromiscuousPortNotSupported struct { func init() { t["VspanPromiscuousPortNotSupported"] = reflect.TypeOf((*VspanPromiscuousPortNotSupported)(nil)).Elem() - minAPIVersionForType["VspanPromiscuousPortNotSupported"] = "5.0" } type VspanPromiscuousPortNotSupportedFault VspanPromiscuousPortNotSupported @@ -98509,7 +97913,6 @@ type VspanSameSessionPortConflict struct { func init() { t["VspanSameSessionPortConflict"] = reflect.TypeOf((*VspanSameSessionPortConflict)(nil)).Elem() - minAPIVersionForType["VspanSameSessionPortConflict"] = "5.0" } type VspanSameSessionPortConflictFault VspanSameSessionPortConflict @@ -98574,7 +97977,6 @@ type VvolDatastoreInfo struct { func init() { t["VvolDatastoreInfo"] = reflect.TypeOf((*VvolDatastoreInfo)(nil)).Elem() - minAPIVersionForType["VvolDatastoreInfo"] = "6.0" } type WaitForUpdates WaitForUpdatesRequestType @@ -98594,10 +97996,10 @@ type WaitForUpdatesExRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` // The data version currently known to the client. The value must be // either - // - the special initial data version (an empty string), - // - a data version returned from `PropertyCollector.CheckForUpdates` or `PropertyCollector.WaitForUpdates` - // - a non-truncated data version returned from `PropertyCollector.WaitForUpdatesEx` - // - a truncated data version returned from the last call to `PropertyCollector.WaitForUpdatesEx` with no intervening calls to `PropertyCollector.WaitForUpdates` or `PropertyCollector.CheckForUpdates`. + // - the special initial data version (an empty string), + // - a data version returned from `PropertyCollector.CheckForUpdates` or `PropertyCollector.WaitForUpdates` + // - a non-truncated data version returned from `PropertyCollector.WaitForUpdatesEx` + // - a truncated data version returned from the last call to `PropertyCollector.WaitForUpdatesEx` with no intervening calls to `PropertyCollector.WaitForUpdates` or `PropertyCollector.CheckForUpdates`. Version string `xml:"version,omitempty" json:"version,omitempty"` // Additional options controlling the change calculation. If omitted, // equivalent to an options argument with no fields set. @@ -98617,10 +98019,10 @@ type WaitForUpdatesRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` // The data version currently known to the client. The value // must be either - // - the special initial version (an empty string) - // - a data version returned from `PropertyCollector.CheckForUpdates` or `PropertyCollector.WaitForUpdates` by the same `PropertyCollector` on the same session - // - a non-truncated data version returned from `PropertyCollector.WaitForUpdatesEx` by the same `PropertyCollector` on the same - // session. + // - the special initial version (an empty string) + // - a data version returned from `PropertyCollector.CheckForUpdates` or `PropertyCollector.WaitForUpdates` by the same `PropertyCollector` on the same session + // - a non-truncated data version returned from `PropertyCollector.WaitForUpdatesEx` by the same `PropertyCollector` on the same + // session. Version string `xml:"version,omitempty" json:"version,omitempty"` } @@ -98679,7 +98081,6 @@ type WaitOptions struct { func init() { t["WaitOptions"] = reflect.TypeOf((*WaitOptions)(nil)).Elem() - minAPIVersionForType["WaitOptions"] = "4.1" } // The virtual machine and at least one of its virtual NICs are configured to @@ -98691,7 +98092,6 @@ type WakeOnLanNotSupported struct { func init() { t["WakeOnLanNotSupported"] = reflect.TypeOf((*WakeOnLanNotSupported)(nil)).Elem() - minAPIVersionForType["WakeOnLanNotSupported"] = "2.5" } // This fault is thrown when Wake-on-LAN isn't supported by the Vmotion NIC on the host. @@ -98701,7 +98101,6 @@ type WakeOnLanNotSupportedByVmotionNIC struct { func init() { t["WakeOnLanNotSupportedByVmotionNIC"] = reflect.TypeOf((*WakeOnLanNotSupportedByVmotionNIC)(nil)).Elem() - minAPIVersionForType["WakeOnLanNotSupportedByVmotionNIC"] = "2.5" } type WakeOnLanNotSupportedByVmotionNICFault WakeOnLanNotSupportedByVmotionNIC @@ -98779,7 +98178,6 @@ type WillLoseHAProtection struct { func init() { t["WillLoseHAProtection"] = reflect.TypeOf((*WillLoseHAProtection)(nil)).Elem() - minAPIVersionForType["WillLoseHAProtection"] = "5.0" } type WillLoseHAProtectionFault WillLoseHAProtection @@ -98834,7 +98232,6 @@ type WillResetSnapshotDirectory struct { func init() { t["WillResetSnapshotDirectory"] = reflect.TypeOf((*WillResetSnapshotDirectory)(nil)).Elem() - minAPIVersionForType["WillResetSnapshotDirectory"] = "5.0" } type WillResetSnapshotDirectoryFault WillResetSnapshotDirectory @@ -98856,7 +98253,6 @@ type WinNetBIOSConfigInfo struct { func init() { t["WinNetBIOSConfigInfo"] = reflect.TypeOf((*WinNetBIOSConfigInfo)(nil)).Elem() - minAPIVersionForType["WinNetBIOSConfigInfo"] = "4.1" } // This exception is thrown when VirtualMachine.wipeDisk @@ -98867,7 +98263,6 @@ type WipeDiskFault struct { func init() { t["WipeDiskFault"] = reflect.TypeOf((*WipeDiskFault)(nil)).Elem() - minAPIVersionForType["WipeDiskFault"] = "5.1" } type WipeDiskFaultFault WipeDiskFault @@ -98892,7 +98287,6 @@ type WitnessNodeInfo struct { func init() { t["WitnessNodeInfo"] = reflect.TypeOf((*WitnessNodeInfo)(nil)).Elem() - minAPIVersionForType["WitnessNodeInfo"] = "6.5" } type XmlToCustomizationSpecItem XmlToCustomizationSpecItemRequestType @@ -99226,6 +98620,25 @@ func init() { type SetCustomValueResponse struct { } +type StartDpuFailover StartDpuFailoverRequestType + +func init() { + t["startDpuFailover"] = reflect.TypeOf((*StartDpuFailover)(nil)).Elem() +} + +type StartDpuFailoverRequestType struct { + This ManagedObjectReference `xml:"_this" json:"-"` + DvsName string `xml:"dvsName" json:"dvsName"` + TargetDpuAlias string `xml:"targetDpuAlias,omitempty" json:"targetDpuAlias,omitempty"` +} + +func init() { + t["startDpuFailoverRequestType"] = reflect.TypeOf((*StartDpuFailoverRequestType)(nil)).Elem() +} + +type StartDpuFailoverResponse struct { +} + type UnregisterVAppRequestType struct { This ManagedObjectReference `xml:"_this" json:"-"` } diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go deleted file mode 100644 index 2b19c93e8..000000000 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ /dev/null @@ -1,660 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer -// interface. Given the name of a (signed or unsigned) integer type T that has constants -// defined, stringer will create a new self-contained Go source file implementing -// -// func (t T) String() string -// -// The file is created in the same package and directory as the package that defines T. -// It has helpful defaults designed for use with go generate. -// -// Stringer works best with constants that are consecutive values such as created using iota, -// but creates good code regardless. In the future it might also provide custom support for -// constant sets that are bit patterns. -// -// For example, given this snippet, -// -// package painkiller -// -// type Pill int -// -// const ( -// Placebo Pill = iota -// Aspirin -// Ibuprofen -// Paracetamol -// Acetaminophen = Paracetamol -// ) -// -// running this command -// -// stringer -type=Pill -// -// in the same directory will create the file pill_string.go, in package painkiller, -// containing a definition of -// -// func (Pill) String() string -// -// That method will translate the value of a Pill constant to the string representation -// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will -// print the string "Aspirin". -// -// Typically this process would be run using go generate, like this: -// -// //go:generate stringer -type=Pill -// -// If multiple constants have the same value, the lexically first matching name will -// be used (in the example, Acetaminophen will print as "Paracetamol"). -// -// With no arguments, it processes the package in the current directory. -// Otherwise, the arguments must name a single directory holding a Go package -// or a set of Go source files that represent a single Go package. -// -// The -type flag accepts a comma-separated list of types so a single run can -// generate methods for multiple types. The default output file is t_string.go, -// where t is the lower-cased name of the first type listed. It can be overridden -// with the -output flag. -// -// The -linecomment flag tells stringer to generate the text of any line comment, trimmed -// of leading spaces, instead of the constant name. For instance, if the constants above had a -// Pill prefix, one could write -// -// PillAspirin // Aspirin -// -// to suppress it in the output. -package main // import "golang.org/x/tools/cmd/stringer" - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/constant" - "go/format" - "go/token" - "go/types" - "log" - "os" - "path/filepath" - "sort" - "strings" - - "golang.org/x/tools/go/packages" -) - -var ( - typeNames = flag.String("type", "", "comma-separated list of type names; must be set") - output = flag.String("output", "", "output file name; default srcdir/_string.go") - trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") - linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present") - buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") -) - -// Usage is a replacement usage function for the flags package. -func Usage() { - fmt.Fprintf(os.Stderr, "Usage of stringer:\n") - fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") - fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") - fmt.Fprintf(os.Stderr, "For more information, see:\n") - fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n") - fmt.Fprintf(os.Stderr, "Flags:\n") - flag.PrintDefaults() -} - -func main() { - log.SetFlags(0) - log.SetPrefix("stringer: ") - flag.Usage = Usage - flag.Parse() - if len(*typeNames) == 0 { - flag.Usage() - os.Exit(2) - } - types := strings.Split(*typeNames, ",") - var tags []string - if len(*buildTags) > 0 { - tags = strings.Split(*buildTags, ",") - } - - // We accept either one directory or a list of files. Which do we have? - args := flag.Args() - if len(args) == 0 { - // Default: process whole package in current directory. - args = []string{"."} - } - - // Parse the package once. - var dir string - g := Generator{ - trimPrefix: *trimprefix, - lineComment: *linecomment, - } - // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). - if len(args) == 1 && isDirectory(args[0]) { - dir = args[0] - } else { - if len(tags) != 0 { - log.Fatal("-tags option applies only to directories, not when files are specified") - } - dir = filepath.Dir(args[0]) - } - - g.parsePackage(args, tags) - - // Print the header and package clause. - g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) - g.Printf("\n") - g.Printf("package %s", g.pkg.name) - g.Printf("\n") - g.Printf("import \"strconv\"\n") // Used by all methods. - - // Run generate for each type. - for _, typeName := range types { - g.generate(typeName) - } - - // Format the output. - src := g.format() - - // Write to file. - outputName := *output - if outputName == "" { - baseName := fmt.Sprintf("%s_string.go", types[0]) - outputName = filepath.Join(dir, strings.ToLower(baseName)) - } - err := os.WriteFile(outputName, src, 0644) - if err != nil { - log.Fatalf("writing output: %s", err) - } -} - -// isDirectory reports whether the named file is a directory. -func isDirectory(name string) bool { - info, err := os.Stat(name) - if err != nil { - log.Fatal(err) - } - return info.IsDir() -} - -// Generator holds the state of the analysis. Primarily used to buffer -// the output for format.Source. -type Generator struct { - buf bytes.Buffer // Accumulated output. - pkg *Package // Package we are scanning. - - trimPrefix string - lineComment bool - - logf func(format string, args ...interface{}) // test logging hook; nil when not testing -} - -func (g *Generator) Printf(format string, args ...interface{}) { - fmt.Fprintf(&g.buf, format, args...) -} - -// File holds a single parsed file and associated data. -type File struct { - pkg *Package // Package to which this file belongs. - file *ast.File // Parsed AST. - // These fields are reset for each type being generated. - typeName string // Name of the constant type. - values []Value // Accumulator for constant values of that type. - - trimPrefix string - lineComment bool -} - -type Package struct { - name string - defs map[*ast.Ident]types.Object - files []*File -} - -// parsePackage analyzes the single package constructed from the patterns and tags. -// parsePackage exits if there is an error. -func (g *Generator) parsePackage(patterns []string, tags []string) { - cfg := &packages.Config{ - Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, - // TODO: Need to think about constants in test files. Maybe write type_string_test.go - // in a separate pass? For later. - Tests: false, - BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, - Logf: g.logf, - } - pkgs, err := packages.Load(cfg, patterns...) - if err != nil { - log.Fatal(err) - } - if len(pkgs) != 1 { - log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) - } - g.addPackage(pkgs[0]) -} - -// addPackage adds a type checked Package and its syntax files to the generator. -func (g *Generator) addPackage(pkg *packages.Package) { - g.pkg = &Package{ - name: pkg.Name, - defs: pkg.TypesInfo.Defs, - files: make([]*File, len(pkg.Syntax)), - } - - for i, file := range pkg.Syntax { - g.pkg.files[i] = &File{ - file: file, - pkg: g.pkg, - trimPrefix: g.trimPrefix, - lineComment: g.lineComment, - } - } -} - -// generate produces the String method for the named type. -func (g *Generator) generate(typeName string) { - values := make([]Value, 0, 100) - for _, file := range g.pkg.files { - // Set the state for this run of the walker. - file.typeName = typeName - file.values = nil - if file.file != nil { - ast.Inspect(file.file, file.genDecl) - values = append(values, file.values...) - } - } - - if len(values) == 0 { - log.Fatalf("no values defined for type %s", typeName) - } - // Generate code that will fail if the constants change value. - g.Printf("func _() {\n") - g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") - g.Printf("\t// Re-run the stringer command to generate them again.\n") - g.Printf("\tvar x [1]struct{}\n") - for _, v := range values { - g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str) - } - g.Printf("}\n") - runs := splitIntoRuns(values) - // The decision of which pattern to use depends on the number of - // runs in the numbers. If there's only one, it's easy. For more than - // one, there's a tradeoff between complexity and size of the data - // and code vs. the simplicity of a map. A map takes more space, - // but so does the code. The decision here (crossover at 10) is - // arbitrary, but considers that for large numbers of runs the cost - // of the linear scan in the switch might become important, and - // rather than use yet another algorithm such as binary search, - // we punt and use a map. In any case, the likelihood of a map - // being necessary for any realistic example other than bitmasks - // is very low. And bitmasks probably deserve their own analysis, - // to be done some other day. - switch { - case len(runs) == 1: - g.buildOneRun(runs, typeName) - case len(runs) <= 10: - g.buildMultipleRuns(runs, typeName) - default: - g.buildMap(runs, typeName) - } -} - -// splitIntoRuns breaks the values into runs of contiguous sequences. -// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. -// The input slice is known to be non-empty. -func splitIntoRuns(values []Value) [][]Value { - // We use stable sort so the lexically first name is chosen for equal elements. - sort.Stable(byValue(values)) - // Remove duplicates. Stable sort has put the one we want to print first, - // so use that one. The String method won't care about which named constant - // was the argument, so the first name for the given value is the only one to keep. - // We need to do this because identical values would cause the switch or map - // to fail to compile. - j := 1 - for i := 1; i < len(values); i++ { - if values[i].value != values[i-1].value { - values[j] = values[i] - j++ - } - } - values = values[:j] - runs := make([][]Value, 0, 10) - for len(values) > 0 { - // One contiguous sequence per outer loop. - i := 1 - for i < len(values) && values[i].value == values[i-1].value+1 { - i++ - } - runs = append(runs, values[:i]) - values = values[i:] - } - return runs -} - -// format returns the gofmt-ed contents of the Generator's buffer. -func (g *Generator) format() []byte { - src, err := format.Source(g.buf.Bytes()) - if err != nil { - // Should never happen, but can arise when developing this code. - // The user can compile the output to see the error. - log.Printf("warning: internal error: invalid Go generated: %s", err) - log.Printf("warning: compile the package to analyze the error") - return g.buf.Bytes() - } - return src -} - -// Value represents a declared constant. -type Value struct { - originalName string // The name of the constant. - name string // The name with trimmed prefix. - // The value is stored as a bit pattern alone. The boolean tells us - // whether to interpret it as an int64 or a uint64; the only place - // this matters is when sorting. - // Much of the time the str field is all we need; it is printed - // by Value.String. - value uint64 // Will be converted to int64 when needed. - signed bool // Whether the constant is a signed type. - str string // The string representation given by the "go/constant" package. -} - -func (v *Value) String() string { - return v.str -} - -// byValue lets us sort the constants into increasing order. -// We take care in the Less method to sort in signed or unsigned order, -// as appropriate. -type byValue []Value - -func (b byValue) Len() int { return len(b) } -func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byValue) Less(i, j int) bool { - if b[i].signed { - return int64(b[i].value) < int64(b[j].value) - } - return b[i].value < b[j].value -} - -// genDecl processes one declaration clause. -func (f *File) genDecl(node ast.Node) bool { - decl, ok := node.(*ast.GenDecl) - if !ok || decl.Tok != token.CONST { - // We only care about const declarations. - return true - } - // The name of the type of the constants we are declaring. - // Can change if this is a multi-element declaration. - typ := "" - // Loop over the elements of the declaration. Each element is a ValueSpec: - // a list of names possibly followed by a type, possibly followed by values. - // If the type and value are both missing, we carry down the type (and value, - // but the "go/types" package takes care of that). - for _, spec := range decl.Specs { - vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. - if vspec.Type == nil && len(vspec.Values) > 0 { - // "X = 1". With no type but a value. If the constant is untyped, - // skip this vspec and reset the remembered type. - typ = "" - - // If this is a simple type conversion, remember the type. - // We don't mind if this is actually a call; a qualified call won't - // be matched (that will be SelectorExpr, not Ident), and only unusual - // situations will result in a function call that appears to be - // a type conversion. - ce, ok := vspec.Values[0].(*ast.CallExpr) - if !ok { - continue - } - id, ok := ce.Fun.(*ast.Ident) - if !ok { - continue - } - typ = id.Name - } - if vspec.Type != nil { - // "X T". We have a type. Remember it. - ident, ok := vspec.Type.(*ast.Ident) - if !ok { - continue - } - typ = ident.Name - } - if typ != f.typeName { - // This is not the type we're looking for. - continue - } - // We now have a list of names (from one line of source code) all being - // declared with the desired type. - // Grab their names and actual values and store them in f.values. - for _, name := range vspec.Names { - if name.Name == "_" { - continue - } - // This dance lets the type checker find the values for us. It's a - // bit tricky: look up the object declared by the name, find its - // types.Const, and extract its value. - obj, ok := f.pkg.defs[name] - if !ok { - log.Fatalf("no value for constant %s", name) - } - info := obj.Type().Underlying().(*types.Basic).Info() - if info&types.IsInteger == 0 { - log.Fatalf("can't handle non-integer constant type %s", typ) - } - value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. - if value.Kind() != constant.Int { - log.Fatalf("can't happen: constant is not an integer %s", name) - } - i64, isInt := constant.Int64Val(value) - u64, isUint := constant.Uint64Val(value) - if !isInt && !isUint { - log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) - } - if !isInt { - u64 = uint64(i64) - } - v := Value{ - originalName: name.Name, - value: u64, - signed: info&types.IsUnsigned == 0, - str: value.String(), - } - if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 { - v.name = strings.TrimSpace(c.Text()) - } else { - v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) - } - f.values = append(f.values, v) - } - } - return false -} - -// Helpers - -// usize returns the number of bits of the smallest unsigned integer -// type that will hold n. Used to create the smallest possible slice of -// integers to use as indexes into the concatenated strings. -func usize(n int) int { - switch { - case n < 1<<8: - return 8 - case n < 1<<16: - return 16 - default: - // 2^32 is enough constants for anyone. - return 32 - } -} - -// declareIndexAndNameVars declares the index slices and concatenated names -// strings representing the runs of values. -func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { - var indexes, names []string - for i, run := range runs { - index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) - if len(run) != 1 { - indexes = append(indexes, index) - } - names = append(names, name) - } - g.Printf("const (\n") - for _, name := range names { - g.Printf("\t%s\n", name) - } - g.Printf(")\n\n") - - if len(indexes) > 0 { - g.Printf("var (") - for _, index := range indexes { - g.Printf("\t%s\n", index) - } - g.Printf(")\n\n") - } -} - -// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars -func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { - index, name := g.createIndexAndNameDecl(run, typeName, "") - g.Printf("const %s\n", name) - g.Printf("var %s\n", index) -} - -// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". -func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { - b := new(bytes.Buffer) - indexes := make([]int, len(run)) - for i := range run { - b.WriteString(run[i].name) - indexes[i] = b.Len() - } - nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) - nameLen := b.Len() - b.Reset() - fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) - for i, v := range indexes { - if i > 0 { - fmt.Fprintf(b, ", ") - } - fmt.Fprintf(b, "%d", v) - } - fmt.Fprintf(b, "}") - return b.String(), nameConst -} - -// declareNameVars declares the concatenated names string representing all the values in the runs. -func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { - g.Printf("const _%s_name%s = \"", typeName, suffix) - for _, run := range runs { - for i := range run { - g.Printf("%s", run[i].name) - } - } - g.Printf("\"\n") -} - -// buildOneRun generates the variables and String method for a single run of contiguous values. -func (g *Generator) buildOneRun(runs [][]Value, typeName string) { - values := runs[0] - g.Printf("\n") - g.declareIndexAndNameVar(values, typeName) - // The generated code is simple enough to write as a Printf format. - lessThanZero := "" - if values[0].signed { - lessThanZero = "i < 0 || " - } - if values[0].value == 0 { // Signed or unsigned, 0 is still 0. - g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) - } else { - g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) - } -} - -// Arguments to format are: -// -// [1]: type name -// [2]: size of index element (8 for uint8 etc.) -// [3]: less than zero check (for signed types) -const stringOneRun = `func (i %[1]s) String() string { - if %[3]si >= %[1]s(len(_%[1]s_index)-1) { - return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] -} -` - -// Arguments to format are: -// [1]: type name -// [2]: lowest defined value for type, as a string -// [3]: size of index element (8 for uint8 etc.) -// [4]: less than zero check (for signed types) -/* - */ -const stringOneRunWithOffset = `func (i %[1]s) String() string { - i -= %[2]s - if %[4]si >= %[1]s(len(_%[1]s_index)-1) { - return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")" - } - return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] -} -` - -// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. -// For this pattern, a single Printf format won't do. -func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { - g.Printf("\n") - g.declareIndexAndNameVars(runs, typeName) - g.Printf("func (i %s) String() string {\n", typeName) - g.Printf("\tswitch {\n") - for i, values := range runs { - if len(values) == 1 { - g.Printf("\tcase i == %s:\n", &values[0]) - g.Printf("\t\treturn _%s_name_%d\n", typeName, i) - continue - } - if values[0].value == 0 && !values[0].signed { - // For an unsigned lower bound of 0, "0 <= i" would be redundant. - g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) - } else { - g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) - } - if values[0].value != 0 { - g.Printf("\t\ti -= %s\n", &values[0]) - } - g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", - typeName, i, typeName, i, typeName, i) - } - g.Printf("\tdefault:\n") - g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName) - g.Printf("\t}\n") - g.Printf("}\n") -} - -// buildMap handles the case where the space is so sparse a map is a reasonable fallback. -// It's a rare situation but has simple code. -func (g *Generator) buildMap(runs [][]Value, typeName string) { - g.Printf("\n") - g.declareNameVars(runs, typeName, "") - g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) - n := 0 - for _, values := range runs { - for _, value := range values { - g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) - n += len(value.name) - } - } - g.Printf("}\n\n") - g.Printf(stringMap, typeName) -} - -// Argument to format is the type name. -const stringMap = `func (i %[1]s) String() string { - if str, ok := _%[1]s_map[i]; ok { - return str - } - return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" -} -` diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go deleted file mode 100644 index 137cc8df1..000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "go/token" - "go/types" - "io" - "os/exec" - - "golang.org/x/tools/internal/gcimporter" -) - -// Find returns the name of an object (.o) or archive (.a) file -// containing type information for the specified import path, -// using the go command. -// If no file was found, an empty filename is returned. -// -// A relative srcDir is interpreted relative to the current working directory. -// -// Find also returns the package's resolved (canonical) import path, -// reflecting the effects of srcDir and vendoring on importPath. -// -// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, -// which is more efficient. -func Find(importPath, srcDir string) (filename, path string) { - cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) - cmd.Dir = srcDir - out, err := cmd.Output() - if err != nil { - return "", "" - } - var data struct { - ImportPath string - Export string - } - json.Unmarshal(out, &data) - return data.Export, data.ImportPath -} - -// NewReader returns a reader for the export data section of an object -// (.o) or archive (.a) file read from r. The new reader may provide -// additional trailing data beyond the end of the export data. -func NewReader(r io.Reader) (io.Reader, error) { - buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) - if err != nil { - return nil, err - } - - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } -} - -// readAll works the same way as io.ReadAll, but avoids allocations and copies -// by preallocating a byte slice of the necessary size if the size is known up -// front. This is always possible when the input is an archive. In that case, -// NewReader will return the known size using an io.LimitedReader. -func readAll(r io.Reader) ([]byte, error) { - if lr, ok := r.(*io.LimitedReader); ok { - data := make([]byte, lr.N) - _, err := io.ReadFull(lr, data) - return data, err - } - return io.ReadAll(r) -} - -// Read reads export data from in, decodes it, and returns type -// information for the package. -// -// The package path (effectively its linker symbol prefix) is -// specified by path, since unlike the package name, this information -// may not be recorded in the export data. -// -// File position information is added to fset. -// -// Read may inspect and add to the imports map to ensure that references -// within the export data to other packages are consistent. The caller -// must ensure that imports[path] does not exist, or exists but is -// incomplete (see types.Package.Complete), and Read inserts the -// resulting package into this map entry. -// -// On return, the state of the reader is undefined. -func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { - data, err := readAll(in) - if err != nil { - return nil, fmt.Errorf("reading export data for %q: %v", path, err) - } - - if bytes.HasPrefix(data, []byte("!")) { - return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) - } - - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': // indexed, till go1.19 - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err - - case 'u': // unified, from go1.20 - _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) - } - } - return nil, fmt.Errorf("empty export data for %s", path) -} - -// Write writes encoded type information for the specified package to out. -// The FileSet provides file position information for named objects. -func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - if _, err := io.WriteString(out, "i"); err != nil { - return err - } - return gcimporter.IExportData(out, fset, pkg) -} - -// ReadBundle reads an export bundle from in, decodes it, and returns type -// information for the packages. -// File position information is added to fset. -// -// ReadBundle may inspect and add to the imports map to ensure that references -// within the export bundle to other packages are consistent. -// -// On return, the state of the reader is undefined. -// -// Experimental: This API is experimental and may change in the future. -func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { - data, err := readAll(in) - if err != nil { - return nil, fmt.Errorf("reading export bundle: %v", err) - } - return gcimporter.IImportBundle(fset, imports, data) -} - -// WriteBundle writes encoded type information for the specified packages to out. -// The FileSet provides file position information for named objects. -// -// Experimental: This API is experimental and may change in the future. -func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { - return gcimporter.IExportBundle(out, fset, pkgs) -} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go deleted file mode 100644 index 37a7247e2..000000000 --- a/vendor/golang.org/x/tools/go/gcexportdata/importer.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcexportdata - -import ( - "fmt" - "go/token" - "go/types" - "os" -) - -// NewImporter returns a new instance of the types.Importer interface -// that reads type information from export data files written by gc. -// The Importer also satisfies types.ImporterFrom. -// -// Export data files are located using "go build" workspace conventions -// and the build.Default context. -// -// Use this importer instead of go/importer.For("gc", ...) to avoid the -// version-skew problems described in the documentation of this package, -// or to control the FileSet or access the imports map populated during -// package loading. -// -// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, -// which is more efficient. -func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { - return importer{fset, imports} -} - -type importer struct { - fset *token.FileSet - imports map[string]*types.Package -} - -func (imp importer) Import(importPath string) (*types.Package, error) { - return imp.ImportFrom(importPath, "", 0) -} - -func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { - filename, path := Find(importPath, srcDir) - if filename == "" { - if importPath == "unsafe" { - // Even for unsafe, call Find first in case - // the package was vendored. - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %s", importPath) - } - - if pkg, ok := imp.imports[path]; ok && pkg.Complete() { - return pkg, nil // cache hit - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - f.Close() - if err != nil { - // add file name to error - err = fmt.Errorf("reading export data: %s: %v", filename, err) - } - }() - - r, err := NewReader(f) - if err != nil { - return nil, err - } - - return Read(r, imp.fset, imp.imports, path) -} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go deleted file mode 100644 index 333676b7c..000000000 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesdriver fetches type sizes for go/packages and go/analysis. -package packagesdriver - -import ( - "context" - "fmt" - "strings" - - "golang.org/x/tools/internal/gocommand" -) - -func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { - inv.Verb = "list" - inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} - stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) - var goarch, compiler string - if rawErr != nil { - rawErrMsg := rawErr.Error() - if strings.Contains(rawErrMsg, "cannot find main module") || - strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. - // All bets are off. Get GOARCH and guess compiler is gc. - // TODO(matloob): Is this a problem in practice? - inv.Verb = "env" - inv.Args = []string{"GOARCH"} - envout, enverr := gocmdRunner.Run(ctx, inv) - if enverr != nil { - return "", "", enverr - } - goarch = strings.TrimSpace(envout.String()) - compiler = "gc" - } else if friendlyErr != nil { - return "", "", friendlyErr - } else { - // This should be unreachable, but be defensive - // in case RunRaw's error results are inconsistent. - return "", "", rawErr - } - } else { - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", - stdout.String(), stderr.String()) - } - goarch = fields[0] - compiler = fields[1] - } - return compiler, goarch, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go deleted file mode 100644 index a8d7b06ac..000000000 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package packages loads Go packages for inspection and analysis. - -The [Load] function takes as input a list of patterns and returns a -list of [Package] values describing individual packages matched by those -patterns. -A [Config] specifies configuration options, the most important of which is -the [LoadMode], which controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool. -The default build tool is the go command. -Its supported patterns are described at -https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. -Other build systems may be supported by providing a "driver"; -see [The driver protocol]. - -All patterns with the prefix "query=", where query is a -non-empty string of letters from [a-z], are reserved and may be -interpreted as query operators. - -Two query operators are currently supported: "file" and "pattern". - -The query "file=path/to/file.go" matches the package or packages enclosing -the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" -might return the packages "fmt" and "fmt [fmt.test]". - -The query "pattern=string" causes "string" to be passed directly to -the underlying build tool. In most cases this is unnecessary, -but an application can use Load("pattern=" + x) as an escaping mechanism -to ensure that x is not interpreted as a query operator if it contains '='. - -All other query operators are reserved for future use and currently -cause Load to report an error. - -The Package struct provides basic information about the package, including - - - ID, a unique identifier for the package in the returned set; - - GoFiles, the names of the package's Go source files; - - Imports, a map from source import strings to the Packages they name; - - Types, the type information for the package's exported symbols; - - Syntax, the parsed syntax trees for the package's source code; and - - TypesInfo, the result of a complete type-check of the package syntax trees. - -(See the documentation for type Package for the complete list of fields -and more detailed descriptions.) - -For example, - - Load(nil, "bytes", "unicode...") - -returns four Package structs describing the standard library packages -bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern -can match multiple packages and that a package might be matched by -multiple patterns: in general it is not possible to determine which -packages correspond to which patterns. - -Note that the list returned by Load contains only the packages matched -by the patterns. Their dependencies can be found by walking the import -graph using the Imports fields. - -The Load function can be configured by passing a pointer to a Config as -the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. -See the documentation for type Config for details. - -As noted earlier, the Config.Mode controls the amount of detail -reported about the loaded packages. See the documentation for type LoadMode -for details. - -Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them -according to the conventions of the underlying build system. - -See the Example function for typical usage. - -# The driver protocol - -[Load] may be used to load Go packages even in Go projects that use -alternative build systems, by installing an appropriate "driver" -program for the build system and specifying its location in the -GOPACKAGESDRIVER environment variable. -For example, -https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration -explains how to use the driver for Bazel. - -The driver program is responsible for interpreting patterns in its -preferred notation and reporting information about the packages that -those patterns identify. Drivers must also support the special "file=" -and "pattern=" patterns described above. - -The patterns are provided as positional command-line arguments. A -JSON-encoded [DriverRequest] message providing additional information -is written to the driver's standard input. The driver must write a -JSON-encoded [DriverResponse] message to its standard output. (This -message differs from the JSON schema produced by 'go list'.) -*/ -package packages // import "golang.org/x/tools/go/packages" - -/* - -Motivation and design considerations - -The new package's design solves problems addressed by two existing -packages: go/build, which locates and describes packages, and -golang.org/x/tools/go/loader, which loads, parses and type-checks them. -The go/build.Package structure encodes too much of the 'go build' way -of organizing projects, leaving us in need of a data type that describes a -package of Go source code independent of the underlying build system. -We wanted something that works equally well with go build and vgo, and -also other build systems such as Bazel and Blaze, making it possible to -construct analysis tools that work in all these environments. -Tools such as errcheck and staticcheck were essentially unavailable to -the Go community at Google, and some of Google's internal tools for Go -are unavailable externally. -This new package provides a uniform way to obtain package metadata by -querying each of these build systems, optionally supporting their -preferred command-line notations for packages, so that tools integrate -neatly with users' build environments. The Metadata query function -executes an external query tool appropriate to the current workspace. - -Loading packages always returns the complete import graph "all the way down", -even if all you want is information about a single package, because the query -mechanisms of all the build systems we currently support ({go,vgo} list, and -blaze/bazel aspect-based query) cannot provide detailed information -about one package without visiting all its dependencies too, so there is -no additional asymptotic cost to providing transitive information. -(This property might not be true of a hypothetical 5th build system.) - -In calls to TypeCheck, all initial packages, and any package that -transitively depends on one of them, must be loaded from source. -Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from -source; D may be loaded from export data, and E may not be loaded at all -(though it's possible that D's export data mentions it, so a -types.Package may be created for it and exposed.) - -The old loader had a feature to suppress type-checking of function -bodies on a per-package basis, primarily intended to reduce the work of -obtaining type information for imported packages. Now that imports are -satisfied by export data, the optimization no longer seems necessary. - -Despite some early attempts, the old loader did not exploit export data, -instead always using the equivalent of WholeProgram mode. This was due -to the complexity of mixing source and export data packages (now -resolved by the upward traversal mentioned above), and because export data -files were nearly always missing or stale. Now that 'go build' supports -caching, all the underlying build systems can guarantee to produce -export data in a reasonable (amortized) time. - -Test "main" packages synthesized by the build system are now reported as -first-class packages, avoiding the need for clients (such as go/ssa) to -reinvent this generation logic. - -One way in which go/packages is simpler than the old loader is in its -treatment of in-package tests. In-package tests are packages that -consist of all the files of the library under test, plus the test files. -The old loader constructed in-package tests by a two-phase process of -mutation called "augmentation": first it would construct and type check -all the ordinary library packages and type-check the packages that -depend on them; then it would add more (test) files to the package and -type-check again. This two-phase approach had four major problems: -1) in processing the tests, the loader modified the library package, - leaving no way for a client application to see both the test - package and the library package; one would mutate into the other. -2) because test files can declare additional methods on types defined in - the library portion of the package, the dispatch of method calls in - the library portion was affected by the presence of the test files. - This should have been a clue that the packages were logically - different. -3) this model of "augmentation" assumed at most one in-package test - per library package, which is true of projects using 'go build', - but not other build systems. -4) because of the two-phase nature of test processing, all packages that - import the library package had to be processed before augmentation, - forcing a "one-shot" API and preventing the client from calling Load - in several times in sequence as is now possible in WholeProgram mode. - (TypeCheck mode has a similar one-shot restriction for a different reason.) - -Early drafts of this package supported "multi-shot" operation. -Although it allowed clients to make a sequence of calls (or concurrent -calls) to Load, building up the graph of Packages incrementally, -it was of marginal value: it complicated the API -(since it allowed some options to vary across calls but not others), -it complicated the implementation, -it cannot be made to work in Types mode, as explained above, -and it was less efficient than making one combined call (when this is possible). -Among the clients we have inspected, none made multiple calls to load -but could not be easily and satisfactorily modified to make only a single call. -However, applications changes may be required. -For example, the ssadump command loads the user-specified packages -and in addition the runtime package. It is tempting to simply append -"runtime" to the user-provided list, but that does not work if the user -specified an ad-hoc package such as [a.go b.go]. -Instead, ssadump no longer requests the runtime package, -but seeks it among the dependencies of the user-specified packages, -and emits an error if it is not found. - -Overlays: The Overlay field in the Config allows providing alternate contents -for Go source files, by providing a mapping from file path to contents. -go/packages will pull in new imports added in overlay files when go/packages -is run in LoadImports mode or greater. -Overlay support for the go list driver isn't complete yet: if the file doesn't -exist on disk, it will only be recognized in an overlay if it is a non-test file -and the package would be reported even without the overlay. - -Questions & Tasks - -- Add GOARCH/GOOS? - They are not portable concepts, but could be made portable. - Our goal has been to allow users to express themselves using the conventions - of the underlying build system: if the build system honors GOARCH - during a build and during a metadata query, then so should - applications built atop that query mechanism. - Conversely, if the target architecture of the build is determined by - command-line flags, the application can pass the relevant - flags through to the build system using a command such as: - myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" - However, this approach is low-level, unwieldy, and non-portable. - GOOS and GOARCH seem important enough to warrant a dedicated option. - -- How should we handle partial failures such as a mixture of good and - malformed patterns, existing and non-existent packages, successful and - failed builds, import failures, import cycles, and so on, in a call to - Load? - -- Support bazel, blaze, and go1.10 list, not just go1.11 list. - -- Handle (and test) various partial success cases, e.g. - a mixture of good packages and: - invalid patterns - nonexistent packages - empty packages - packages with malformed package or import declarations - unreadable files - import cycles - other parse errors - type errors - Make sure we record errors at the correct place in the graph. - -- Missing packages among initial arguments are not reported. - Return bogus packages for them, like golist does. - -- "undeclared name" errors (for example) are reported out of source file - order. I suspect this is due to the breadth-first resolution now used - by go/types. Is that a bug? Discuss with gri. - -*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go deleted file mode 100644 index 4335c1eb1..000000000 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -// This file defines the protocol that enables an external "driver" -// tool to supply package metadata in place of 'go list'. - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - "os/exec" - "strings" -) - -// DriverRequest defines the schema of a request for package metadata -// from an external driver program. The JSON-encoded DriverRequest -// message is provided to the driver program's standard input. The -// query patterns are provided as command-line arguments. -// -// See the package documentation for an overview. -type DriverRequest struct { - Mode LoadMode `json:"mode"` - - // Env specifies the environment the underlying build system should be run in. - Env []string `json:"env"` - - // BuildFlags are flags that should be passed to the underlying build system. - BuildFlags []string `json:"build_flags"` - - // Tests specifies whether the patterns should also return test packages. - Tests bool `json:"tests"` - - // Overlay maps file paths (relative to the driver's working directory) to the byte contents - // of overlay files. - Overlay map[string][]byte `json:"overlay"` -} - -// DriverResponse defines the schema of a response from an external -// driver program, providing the results of a query for package -// metadata. The driver program must write a JSON-encoded -// DriverResponse message to its standard output. -// -// See the package documentation for an overview. -type DriverResponse struct { - // NotHandled is returned if the request can't be handled by the current - // driver. If an external driver returns a response with NotHandled, the - // rest of the DriverResponse is ignored, and go/packages will fallback - // to the next driver. If go/packages is extended in the future to support - // lists of multiple drivers, go/packages will fall back to the next driver. - NotHandled bool - - // Compiler and Arch are the arguments pass of types.SizesFor - // to get a types.Sizes to use when type checking. - Compiler string - Arch string - - // Roots is the set of package IDs that make up the root packages. - // We have to encode this separately because when we encode a single package - // we cannot know if it is one of the roots as that requires knowledge of the - // graph it is part of. - Roots []string `json:",omitempty"` - - // Packages is the full set of packages in the graph. - // The packages are not connected into a graph. - // The Imports if populated will be stubs that only have their ID set. - // Imports will be connected and then type and syntax information added in a - // later pass (see refine). - Packages []*Package - - // GoVersion is the minor version number used by the driver - // (e.g. the go command on the PATH) when selecting .go files. - // Zero means unknown. - GoVersion int -} - -// driver is the type for functions that query the build system for the -// packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) - -// findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." -// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its -// value, otherwise it searches for a binary named gopackagesdriver on the PATH. -func findExternalDriver(cfg *Config) driver { - const toolPrefix = "GOPACKAGESDRIVER=" - tool := "" - for _, env := range cfg.Env { - if val := strings.TrimPrefix(env, toolPrefix); val != env { - tool = val - } - } - if tool != "" && tool == "off" { - return nil - } - if tool == "" { - var err error - tool, err = exec.LookPath("gopackagesdriver") - if err != nil { - return nil - } - } - return func(cfg *Config, words ...string) (*DriverResponse, error) { - req, err := json.Marshal(DriverRequest{ - Mode: cfg.Mode, - Env: cfg.Env, - BuildFlags: cfg.BuildFlags, - Tests: cfg.Tests, - Overlay: cfg.Overlay, - }) - if err != nil { - return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) - } - - buf := new(bytes.Buffer) - stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) - cmd.Dir = cfg.Dir - cmd.Env = cfg.Env - cmd.Stdin = bytes.NewReader(req) - cmd.Stdout = buf - cmd.Stderr = stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) - } - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) - } - - var response DriverResponse - if err := json.Unmarshal(buf.Bytes(), &response); err != nil { - return nil, err - } - return &response, nil - } -} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go deleted file mode 100644 index 22305d9c9..000000000 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ /dev/null @@ -1,1106 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "log" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "unicode" - - "golang.org/x/tools/go/internal/packagesdriver" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/packagesinternal" -) - -// debug controls verbose logging. -var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) - -// A goTooOldError reports that the go command -// found by exec.LookPath is too old to use the new go list behavior. -type goTooOldError struct { - error -} - -// responseDeduper wraps a DriverResponse, deduplicating its contents. -type responseDeduper struct { - seenRoots map[string]bool - seenPackages map[string]*Package - dr *DriverResponse -} - -func newDeduper() *responseDeduper { - return &responseDeduper{ - dr: &DriverResponse{}, - seenRoots: map[string]bool{}, - seenPackages: map[string]*Package{}, - } -} - -// addAll fills in r with a DriverResponse. -func (r *responseDeduper) addAll(dr *DriverResponse) { - for _, pkg := range dr.Packages { - r.addPackage(pkg) - } - for _, root := range dr.Roots { - r.addRoot(root) - } - r.dr.GoVersion = dr.GoVersion -} - -func (r *responseDeduper) addPackage(p *Package) { - if r.seenPackages[p.ID] != nil { - return - } - r.seenPackages[p.ID] = p - r.dr.Packages = append(r.dr.Packages, p) -} - -func (r *responseDeduper) addRoot(id string) { - if r.seenRoots[id] { - return - } - r.seenRoots[id] = true - r.dr.Roots = append(r.dr.Roots, id) -} - -type golistState struct { - cfg *Config - ctx context.Context - - envOnce sync.Once - goEnvError error - goEnv map[string]string - - rootsOnce sync.Once - rootDirsError error - rootDirs map[string]string - - goVersionOnce sync.Once - goVersionError error - goVersion int // The X in Go 1.X. - - // vendorDirs caches the (non)existence of vendor directories. - vendorDirs map[string]bool -} - -// getEnv returns Go environment variables. Only specific variables are -// populated -- computing all of them is slow. -func (state *golistState) getEnv() (map[string]string, error) { - state.envOnce.Do(func() { - var b *bytes.Buffer - b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") - if state.goEnvError != nil { - return - } - - state.goEnv = make(map[string]string) - decoder := json.NewDecoder(b) - if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { - return - } - }) - return state.goEnv, state.goEnvError -} - -// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. -func (state *golistState) mustGetEnv() map[string]string { - env, err := state.getEnv() - if err != nil { - panic(fmt.Sprintf("mustGetEnv: %v", err)) - } - return env -} - -// goListDriver uses the go list command to interpret the patterns and produce -// the build system package structure. -// See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { - // Make sure that any asynchronous go commands are killed when we return. - parentCtx := cfg.Context - if parentCtx == nil { - parentCtx = context.Background() - } - ctx, cancel := context.WithCancel(parentCtx) - defer cancel() - - response := newDeduper() - - state := &golistState{ - cfg: cfg, - ctx: ctx, - vendorDirs: map[string]bool{}, - } - - // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { - errCh := make(chan error) - go func() { - compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - response.dr.Compiler = compiler - response.dr.Arch = arch - errCh <- err - }() - defer func() { - if sizesErr := <-errCh; sizesErr != nil { - err = sizesErr - } - }() - } - - // Determine files requested in contains patterns - var containFiles []string - restPatterns := make([]string, 0, len(patterns)) - // Extract file= and other [querytype]= patterns. Report an error if querytype - // doesn't exist. -extractQueries: - for _, pattern := range patterns { - eqidx := strings.Index(pattern, "=") - if eqidx < 0 { - restPatterns = append(restPatterns, pattern) - } else { - query, value := pattern[:eqidx], pattern[eqidx+len("="):] - switch query { - case "file": - containFiles = append(containFiles, value) - case "pattern": - restPatterns = append(restPatterns, value) - case "": // not a reserved query - restPatterns = append(restPatterns, pattern) - default: - for _, rune := range query { - if rune < 'a' || rune > 'z' { // not a reserved query - restPatterns = append(restPatterns, pattern) - continue extractQueries - } - } - // Reject all other patterns containing "=" - return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) - } - } - } - - // See if we have any patterns to pass through to go list. Zero initial - // patterns also requires a go list call, since it's the equivalent of - // ".". - if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := state.createDriverResponse(restPatterns...) - if err != nil { - return nil, err - } - response.addAll(dr) - } - - if len(containFiles) != 0 { - if err := state.runContainsQueries(response, containFiles); err != nil { - return nil, err - } - } - - // (We may yet return an error due to defer.) - return response.dr, nil -} - -func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { - for _, query := range queries { - // TODO(matloob): Do only one query per directory. - fdir := filepath.Dir(query) - // Pass absolute path of directory to go list so that it knows to treat it as a directory, - // not a package path. - pattern, err := filepath.Abs(fdir) - if err != nil { - return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) - } - dirResponse, err := state.createDriverResponse(pattern) - - // If there was an error loading the package, or no packages are returned, - // or the package is returned with errors, try to load the file as an - // ad-hoc package. - // Usually the error will appear in a returned package, but may not if we're - // in module mode and the ad-hoc is located outside a module. - if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && - len(dirResponse.Packages[0].Errors) == 1 { - var queryErr error - if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { - return err // return the original error - } - } - isRoot := make(map[string]bool, len(dirResponse.Roots)) - for _, root := range dirResponse.Roots { - isRoot[root] = true - } - for _, pkg := range dirResponse.Packages { - // Add any new packages to the main set - // We don't bother to filter packages that will be dropped by the changes of roots, - // that will happen anyway during graph construction outside this function. - // Over-reporting packages is not a problem. - response.addPackage(pkg) - // if the package was not a root one, it cannot have the file - if !isRoot[pkg.ID] { - continue - } - for _, pkgFile := range pkg.GoFiles { - if filepath.Base(query) == filepath.Base(pkgFile) { - response.addRoot(pkg.ID) - break - } - } - } - } - return nil -} - -// adhocPackage attempts to load or construct an ad-hoc package for a given -// query, if the original call to the driver produced inadequate results. -func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) { - response, err := state.createDriverResponse(query) - if err != nil { - return nil, err - } - // If we get nothing back from `go list`, - // try to make this file into its own ad-hoc package. - // TODO(rstambler): Should this check against the original response? - if len(response.Packages) == 0 { - response.Packages = append(response.Packages, &Package{ - ID: "command-line-arguments", - PkgPath: query, - GoFiles: []string{query}, - CompiledGoFiles: []string{query}, - Imports: make(map[string]*Package), - }) - response.Roots = append(response.Roots, "command-line-arguments") - } - // Handle special cases. - if len(response.Packages) == 1 { - // golang/go#33482: If this is a file= query for ad-hoc packages where - // the file only exists on an overlay, and exists outside of a module, - // add the file to the package and remove the errors. - if response.Packages[0].ID == "command-line-arguments" || - filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { - if len(response.Packages[0].GoFiles) == 0 { - filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath - // TODO(matloob): check if the file is outside of a root dir? - for path := range state.cfg.Overlay { - if path == filename { - response.Packages[0].Errors = nil - response.Packages[0].GoFiles = []string{path} - response.Packages[0].CompiledGoFiles = []string{path} - } - } - } - } - } - return response, nil -} - -// Fields must match go list; -// see $GOROOT/src/cmd/go/internal/load/pkg.go. -type jsonPackage struct { - ImportPath string - Dir string - Name string - Export string - GoFiles []string - CompiledGoFiles []string - IgnoredGoFiles []string - IgnoredOtherFiles []string - EmbedPatterns []string - EmbedFiles []string - CFiles []string - CgoFiles []string - CXXFiles []string - MFiles []string - HFiles []string - FFiles []string - SFiles []string - SwigFiles []string - SwigCXXFiles []string - SysoFiles []string - Imports []string - ImportMap map[string]string - Deps []string - Module *Module - TestGoFiles []string - TestImports []string - XTestGoFiles []string - XTestImports []string - ForTest string // q in a "p [q.test]" package, else "" - DepOnly bool - - Error *packagesinternal.PackageError - DepsErrors []*packagesinternal.PackageError -} - -type jsonPackageError struct { - ImportStack []string - Pos string - Err string -} - -func otherFiles(p *jsonPackage) [][]string { - return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} -} - -// createDriverResponse uses the "go list" command to expand the pattern -// words and return a response for the specified packages. -func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) { - // go list uses the following identifiers in ImportPath and Imports: - // - // "p" -- importable package or main (command) - // "q.test" -- q's test executable - // "p [q.test]" -- variant of p as built for q's test executable - // "q_test [q.test]" -- q's external test package - // - // The packages p that are built differently for a test q.test - // are q itself, plus any helpers used by the external test q_test, - // typically including "testing" and all its dependencies. - - // Run "go list" for complete - // information on the specified packages. - goVersion, err := state.getGoVersion() - if err != nil { - return nil, err - } - buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) - if err != nil { - return nil, err - } - - seen := make(map[string]*jsonPackage) - pkgs := make(map[string]*Package) - additionalErrors := make(map[string][]Error) - // Decode the JSON and convert it to Package form. - response := &DriverResponse{ - GoVersion: goVersion, - } - for dec := json.NewDecoder(buf); dec.More(); { - p := new(jsonPackage) - if err := dec.Decode(p); err != nil { - return nil, fmt.Errorf("JSON decoding failed: %v", err) - } - - if p.ImportPath == "" { - // The documentation for go list says that “[e]rroneous packages will have - // a non-empty ImportPath”. If for some reason it comes back empty, we - // prefer to error out rather than silently discarding data or handing - // back a package without any way to refer to it. - if p.Error != nil { - return nil, Error{ - Pos: p.Error.Pos, - Msg: p.Error.Err, - } - } - return nil, fmt.Errorf("package missing import path: %+v", p) - } - - // Work around https://golang.org/issue/33157: - // go list -e, when given an absolute path, will find the package contained at - // that directory. But when no package exists there, it will return a fake package - // with an error and the ImportPath set to the absolute path provided to go list. - // Try to convert that absolute path to what its package path would be if it's - // contained in a known module or GOPATH entry. This will allow the package to be - // properly "reclaimed" when overlays are processed. - if filepath.IsAbs(p.ImportPath) && p.Error != nil { - pkgPath, ok, err := state.getPkgPath(p.ImportPath) - if err != nil { - return nil, err - } - if ok { - p.ImportPath = pkgPath - } - } - - if old, found := seen[p.ImportPath]; found { - // If one version of the package has an error, and the other doesn't, assume - // that this is a case where go list is reporting a fake dependency variant - // of the imported package: When a package tries to invalidly import another - // package, go list emits a variant of the imported package (with the same - // import path, but with an error on it, and the package will have a - // DepError set on it). An example of when this can happen is for imports of - // main packages: main packages can not be imported, but they may be - // separately matched and listed by another pattern. - // See golang.org/issue/36188 for more details. - - // The plan is that eventually, hopefully in Go 1.15, the error will be - // reported on the importing package rather than the duplicate "fake" - // version of the imported package. Once all supported versions of Go - // have the new behavior this logic can be deleted. - // TODO(matloob): delete the workaround logic once all supported versions of - // Go return the errors on the proper package. - - // There should be exactly one version of a package that doesn't have an - // error. - if old.Error == nil && p.Error == nil { - if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) - } - continue - } - - // Determine if this package's error needs to be bubbled up. - // This is a hack, and we expect for go list to eventually set the error - // on the package. - if old.Error != nil { - var errkind string - if strings.Contains(old.Error.Err, "not an importable package") { - errkind = "not an importable package" - } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { - errkind = "use of internal package not allowed" - } - if errkind != "" { - if len(old.Error.ImportStack) < 1 { - return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) - } - importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] - if importingPkg == old.ImportPath { - // Using an older version of Go which put this package itself on top of import - // stack, instead of the importer. Look for importer in second from top - // position. - if len(old.Error.ImportStack) < 2 { - return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) - } - importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] - } - additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ - Pos: old.Error.Pos, - Msg: old.Error.Err, - Kind: ListError, - }) - } - } - - // Make sure that if there's a version of the package without an error, - // that's the one reported to the user. - if old.Error == nil { - continue - } - - // This package will replace the old one at the end of the loop. - } - seen[p.ImportPath] = p - - pkg := &Package{ - Name: p.Name, - ID: p.ImportPath, - GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), - CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), - OtherFiles: absJoin(p.Dir, otherFiles(p)...), - EmbedFiles: absJoin(p.Dir, p.EmbedFiles), - EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), - IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - forTest: p.ForTest, - depsErrors: p.DepsErrors, - Module: p.Module, - } - - if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { - if len(p.CompiledGoFiles) > len(p.GoFiles) { - // We need the cgo definitions, which are in the first - // CompiledGoFile after the non-cgo ones. This is a hack but there - // isn't currently a better way to find it. We also need the pure - // Go files and unprocessed cgo files, all of which are already - // in pkg.GoFiles. - cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] - pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) - } else { - // golang/go#38990: go list silently fails to do cgo processing - pkg.CompiledGoFiles = nil - pkg.Errors = append(pkg.Errors, Error{ - Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", - Kind: ListError, - }) - } - } - - // Work around https://golang.org/issue/28749: - // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. - // Remove files from CompiledGoFiles that are non-go files - // (or are not files that look like they are from the cache). - if len(pkg.CompiledGoFiles) > 0 { - out := pkg.CompiledGoFiles[:0] - for _, f := range pkg.CompiledGoFiles { - if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file - continue - } - out = append(out, f) - } - pkg.CompiledGoFiles = out - } - - // Extract the PkgPath from the package's ID. - if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { - pkg.PkgPath = pkg.ID[:i] - } else { - pkg.PkgPath = pkg.ID - } - - if pkg.PkgPath == "unsafe" { - pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) - } else if len(pkg.CompiledGoFiles) == 0 { - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - pkg.CompiledGoFiles = pkg.GoFiles - } - - // Assume go list emits only absolute paths for Dir. - if p.Dir != "" && !filepath.IsAbs(p.Dir) { - log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) - } - - if p.Export != "" && !filepath.IsAbs(p.Export) { - pkg.ExportFile = filepath.Join(p.Dir, p.Export) - } else { - pkg.ExportFile = p.Export - } - - // imports - // - // Imports contains the IDs of all imported packages. - // ImportsMap records (path, ID) only where they differ. - ids := make(map[string]bool) - for _, id := range p.Imports { - ids[id] = true - } - pkg.Imports = make(map[string]*Package) - for path, id := range p.ImportMap { - pkg.Imports[path] = &Package{ID: id} // non-identity import - delete(ids, id) - } - for id := range ids { - if id == "C" { - continue - } - - pkg.Imports[id] = &Package{ID: id} // identity import - } - if !p.DepOnly { - response.Roots = append(response.Roots, pkg.ID) - } - - // Temporary work-around for golang/go#39986. Parse filenames out of - // error messages. This happens if there are unrecoverable syntax - // errors in the source, so we can't match on a specific error message. - // - // TODO(rfindley): remove this heuristic, in favor of considering - // InvalidGoFiles from the list driver. - if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { - addFilenameFromPos := func(pos string) bool { - split := strings.Split(pos, ":") - if len(split) < 1 { - return false - } - filename := strings.TrimSpace(split[0]) - if filename == "" { - return false - } - if !filepath.IsAbs(filename) { - filename = filepath.Join(state.cfg.Dir, filename) - } - info, _ := os.Stat(filename) - if info == nil { - return false - } - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) - pkg.GoFiles = append(pkg.GoFiles, filename) - return true - } - found := addFilenameFromPos(err.Pos) - // In some cases, go list only reports the error position in the - // error text, not the error position. One such case is when the - // file's package name is a keyword (see golang.org/issue/39763). - if !found { - addFilenameFromPos(err.Err) - } - } - - if p.Error != nil { - msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. - // Address golang.org/issue/35964 by appending import stack to error message. - if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { - msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) - } - pkg.Errors = append(pkg.Errors, Error{ - Pos: p.Error.Pos, - Msg: msg, - Kind: ListError, - }) - } - - pkgs[pkg.ID] = pkg - } - - for id, errs := range additionalErrors { - if p, ok := pkgs[id]; ok { - p.Errors = append(p.Errors, errs...) - } - } - for _, pkg := range pkgs { - response.Packages = append(response.Packages, pkg) - } - sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) - - return response, nil -} - -func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { - if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { - return false - } - - goV, err := state.getGoVersion() - if err != nil { - return false - } - - // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. - // The import stack behaves differently for these versions than newer Go versions. - if goV < 15 { - return len(p.Error.ImportStack) == 0 - } - - // On Go 1.15 and later, only parse filenames out of error if there's no import stack, - // or the current package is at the top of the import stack. This is not guaranteed - // to work perfectly, but should avoid some cases where files in errors don't belong to this - // package. - return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath -} - -// getGoVersion returns the effective minor version of the go command. -func (state *golistState) getGoVersion() (int, error) { - state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) - }) - return state.goVersion, state.goVersionError -} - -// getPkgPath finds the package path of a directory if it's relative to a root -// directory. -func (state *golistState) getPkgPath(dir string) (string, bool, error) { - absDir, err := filepath.Abs(dir) - if err != nil { - return "", false, err - } - roots, err := state.determineRootDirs() - if err != nil { - return "", false, err - } - - for rdir, rpath := range roots { - // Make sure that the directory is in the module, - // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, rdir) { - continue - } - // TODO(matloob): This doesn't properly handle symlinks. - r, err := filepath.Rel(rdir, dir) - if err != nil { - continue - } - if rpath != "" { - // We choose only one root even though the directory even it can belong in multiple modules - // or GOPATH entries. This is okay because we only need to work with absolute dirs when a - // file is missing from disk, for instance when gopls calls go/packages in an overlay. - // Once the file is saved, gopls, or the next invocation of the tool will get the correct - // result straight from golist. - // TODO(matloob): Implement module tiebreaking? - return path.Join(rpath, filepath.ToSlash(r)), true, nil - } - return filepath.ToSlash(r), true, nil - } - return "", false, nil -} - -// absJoin absolutizes and flattens the lists of files. -func absJoin(dir string, fileses ...[]string) (res []string) { - for _, files := range fileses { - for _, file := range files { - if !filepath.IsAbs(file) { - file = filepath.Join(dir, file) - } - res = append(res, file) - } - } - return res -} - -func jsonFlag(cfg *Config, goVersion int) string { - if goVersion < 19 { - return "-json" - } - var fields []string - added := make(map[string]bool) - addFields := func(fs ...string) { - for _, f := range fs { - if !added[f] { - added[f] = true - fields = append(fields, f) - } - } - } - addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { - addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", - "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", - "SwigFiles", "SwigCXXFiles", "SysoFiles") - if cfg.Tests { - addFields("TestGoFiles", "XTestGoFiles") - } - } - if cfg.Mode&NeedTypes != 0 { - // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, - // even when -compiled isn't passed in. - // TODO(#52435): Should we make the test ask for -compiled, or automatically - // request CompiledGoFiles in certain circumstances? - addFields("Dir", "CompiledGoFiles") - } - if cfg.Mode&NeedCompiledGoFiles != 0 { - addFields("Dir", "CompiledGoFiles", "Export") - } - if cfg.Mode&NeedImports != 0 { - // When imports are requested, DepOnly is used to distinguish between packages - // explicitly requested and transitive imports of those packages. - addFields("DepOnly", "Imports", "ImportMap") - if cfg.Tests { - addFields("TestImports", "XTestImports") - } - } - if cfg.Mode&NeedDeps != 0 { - addFields("DepOnly") - } - if usesExportData(cfg) { - // Request Dir in the unlikely case Export is not absolute. - addFields("Dir", "Export") - } - if cfg.Mode&needInternalForTest != 0 { - addFields("ForTest") - } - if cfg.Mode&needInternalDepsErrors != 0 { - addFields("DepsErrors") - } - if cfg.Mode&NeedModule != 0 { - addFields("Module") - } - if cfg.Mode&NeedEmbedFiles != 0 { - addFields("EmbedFiles") - } - if cfg.Mode&NeedEmbedPatterns != 0 { - addFields("EmbedPatterns") - } - return "-json=" + strings.Join(fields, ",") -} - -func golistargs(cfg *Config, words []string, goVersion int) []string { - const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo - fullargs := []string{ - "-e", jsonFlag(cfg, goVersion), - fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), - fmt.Sprintf("-test=%t", cfg.Tests), - fmt.Sprintf("-export=%t", usesExportData(cfg)), - fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), - // go list doesn't let you pass -test and -find together, - // probably because you'd just get the TestMain. - fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), - } - - // golang/go#60456: with go1.21 and later, go list serves pgo variants, which - // can be costly to compute and may result in redundant processing for the - // caller. Disable these variants. If someone wants to add e.g. a NeedPGO - // mode flag, that should be a separate proposal. - if goVersion >= 21 { - fullargs = append(fullargs, "-pgo=off") - } - - fullargs = append(fullargs, cfg.BuildFlags...) - fullargs = append(fullargs, "--") - fullargs = append(fullargs, words...) - return fullargs -} - -// cfgInvocation returns an Invocation that reflects cfg's settings. -func (state *golistState) cfgInvocation() gocommand.Invocation { - cfg := state.cfg - return gocommand.Invocation{ - BuildFlags: cfg.BuildFlags, - ModFile: cfg.modFile, - ModFlag: cfg.modFlag, - CleanEnv: cfg.Env != nil, - Env: cfg.Env, - Logf: cfg.Logf, - WorkingDir: cfg.Dir, - } -} - -// invokeGo returns the stdout of a go command invocation. -func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { - cfg := state.cfg - - inv := state.cfgInvocation() - - // For Go versions 1.16 and above, `go list` accepts overlays directly via - // the -overlay flag. Set it, if it's available. - // - // The check for "list" is not necessarily required, but we should avoid - // getting the go version if possible. - if verb == "list" { - goVersion, err := state.getGoVersion() - if err != nil { - return nil, err - } - if goVersion >= 16 { - filename, cleanup, err := state.writeOverlays() - if err != nil { - return nil, err - } - defer cleanup() - inv.Overlay = filename - } - } - inv.Verb = verb - inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) - if err != nil { - // Check for 'go' executable not being found. - if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { - return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) - } - - exitErr, ok := err.(*exec.ExitError) - if !ok { - // Catastrophic error: - // - context cancellation - return nil, fmt.Errorf("couldn't run 'go': %w", err) - } - - // Old go version? - if strings.Contains(stderr.String(), "flag provided but not defined") { - return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} - } - - // Related to #24854 - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { - return nil, friendlyErr - } - - // Is there an error running the C compiler in cgo? This will be reported in the "Error" field - // and should be suppressed by go list -e. - // - // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. - isPkgPathRune := func(r rune) bool { - // From https://golang.org/ref/spec#Import_declarations: - // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings - // using only characters belonging to Unicode's L, M, N, P, and S general categories - // (the Graphic characters without spaces) and may also exclude the - // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. - return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && - !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) - } - // golang/go#36770: Handle case where cmd/go prints module download messages before the error. - msg := stderr.String() - for strings.HasPrefix(msg, "go: downloading") { - msg = msg[strings.IndexRune(msg, '\n')+1:] - } - if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { - msg := msg[len("# "):] - if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { - return stdout, nil - } - // Treat pkg-config errors as a special case (golang.org/issue/36770). - if strings.HasPrefix(msg, "pkg-config") { - return stdout, nil - } - } - - // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show - // the error in the Err section of stdout in case -e option is provided. - // This fix is provided for backwards compatibility. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { - output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Similar to the previous error, but currently lacks a fix in Go. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { - output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. - // If the package doesn't exist, put the absolute path of the directory into the error message, - // as Go 1.13 list does. - const noSuchDirectory = "no such directory" - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { - errstr := stderr.String() - abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - abspath, strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. - // Note that the error message we look for in this case is different that the one looked for above. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { - output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a - // directory outside any module. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - // TODO(matloob): command-line-arguments isn't correct here. - "command-line-arguments", strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Another variation of the previous error - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - // TODO(matloob): command-line-arguments isn't correct here. - "command-line-arguments", strings.Trim(stderr.String(), "\n")) - return bytes.NewBufferString(output), nil - } - - // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit - // status if there's a dependency on a package that doesn't exist. But it should return - // a zero exit status and set an error on that package. - if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { - // Don't clobber stdout if `go list` actually returned something. - if len(stdout.String()) > 0 { - return stdout, nil - } - // try to extract package name from string - stderrStr := stderr.String() - var importPath string - colon := strings.Index(stderrStr, ":") - if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { - importPath = stderrStr[len("go build "):colon] - } - output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, - importPath, strings.Trim(stderrStr, "\n")) - return bytes.NewBufferString(output), nil - } - - // Export mode entails a build. - // If that build fails, errors appear on stderr - // (despite the -e flag) and the Export field is blank. - // Do not fail in that case. - // The same is true if an ad-hoc package given to go list doesn't exist. - // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when - // packages don't exist or a build fails. - if !usesExportData(cfg) && !containsGoFile(args) { - return nil, friendlyErr - } - } - return stdout, nil -} - -// OverlayJSON is the format overlay files are expected to be in. -// The Replace map maps from overlaid paths to replacement paths: -// the Go command will forward all reads trying to open -// each overlaid path to its replacement path, or consider the overlaid -// path not to exist if the replacement path is empty. -// -// From golang/go#39958. -type OverlayJSON struct { - Replace map[string]string `json:"replace,omitempty"` -} - -// writeOverlays writes out files for go list's -overlay flag, as described -// above. -func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { - // Do nothing if there are no overlays in the config. - if len(state.cfg.Overlay) == 0 { - return "", func() {}, nil - } - dir, err := os.MkdirTemp("", "gopackages-*") - if err != nil { - return "", nil, err - } - // The caller must clean up this directory, unless this function returns an - // error. - cleanup = func() { - os.RemoveAll(dir) - } - defer func() { - if err != nil { - cleanup() - } - }() - overlays := map[string]string{} - for k, v := range state.cfg.Overlay { - // Create a unique filename for the overlaid files, to avoid - // creating nested directories. - noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) - if err != nil { - return "", func() {}, err - } - if _, err := f.Write(v); err != nil { - return "", func() {}, err - } - if err := f.Close(); err != nil { - return "", func() {}, err - } - overlays[k] = f.Name() - } - b, err := json.Marshal(OverlayJSON{Replace: overlays}) - if err != nil { - return "", func() {}, err - } - // Write out the overlay file that contains the filepath mappings. - filename = filepath.Join(dir, "overlay.json") - if err := os.WriteFile(filename, b, 0665); err != nil { - return "", func() {}, err - } - return filename, cleanup, nil -} - -func containsGoFile(s []string) bool { - for _, f := range s { - if strings.HasSuffix(f, ".go") { - return true - } - } - return false -} - -func cmdDebugStr(cmd *exec.Cmd) string { - env := make(map[string]string) - for _, kv := range cmd.Env { - split := strings.SplitN(kv, "=", 2) - k, v := split[0], split[1] - env[k] = v - } - - var args []string - for _, arg := range cmd.Args { - quoted := strconv.Quote(arg) - if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { - args = append(args, quoted) - } else { - args = append(args, arg) - } - } - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) -} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go deleted file mode 100644 index d823c474a..000000000 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "encoding/json" - "path/filepath" - - "golang.org/x/tools/internal/gocommand" -) - -// determineRootDirs returns a mapping from absolute directories that could -// contain code to their corresponding import path prefixes. -func (state *golistState) determineRootDirs() (map[string]string, error) { - env, err := state.getEnv() - if err != nil { - return nil, err - } - if env["GOMOD"] != "" { - state.rootsOnce.Do(func() { - state.rootDirs, state.rootDirsError = state.determineRootDirsModules() - }) - } else { - state.rootsOnce.Do(func() { - state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() - }) - } - return state.rootDirs, state.rootDirsError -} - -func (state *golistState) determineRootDirsModules() (map[string]string, error) { - // List all of the modules--the first will be the directory for the main - // module. Any replaced modules will also need to be treated as roots. - // Editing files in the module cache isn't a great idea, so we don't - // plan to ever support that. - out, err := state.invokeGo("list", "-m", "-json", "all") - if err != nil { - // 'go list all' will fail if we're outside of a module and - // GO111MODULE=on. Try falling back without 'all'. - var innerErr error - out, innerErr = state.invokeGo("list", "-m", "-json") - if innerErr != nil { - return nil, err - } - } - roots := map[string]string{} - modules := map[string]string{} - var i int - for dec := json.NewDecoder(out); dec.More(); { - mod := new(gocommand.ModuleJSON) - if err := dec.Decode(mod); err != nil { - return nil, err - } - if mod.Dir != "" && mod.Path != "" { - // This is a valid module; add it to the map. - absDir, err := filepath.Abs(mod.Dir) - if err != nil { - return nil, err - } - modules[absDir] = mod.Path - // The first result is the main module. - if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { - roots[absDir] = mod.Path - } - } - i++ - } - return roots, nil -} - -func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { - m := map[string]string{} - for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { - absDir, err := filepath.Abs(dir) - if err != nil { - return nil, err - } - m[filepath.Join(absDir, "src")] = "" - } - return m, nil -} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go deleted file mode 100644 index 5c080d21b..000000000 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "fmt" - "strings" -) - -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, -} - -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { - return "LoadMode(0)" - } - var out []string - for i, x := range allModes { - if x > m { - break - } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x - } - } - if m != 0 { - out = append(out, "Unknown") - } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) -} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go deleted file mode 100644 index 3ea1b3fa4..000000000 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ /dev/null @@ -1,1445 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -// See doc.go for package documentation and implementation notes. - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "go/ast" - "go/parser" - "go/scanner" - "go/token" - "go/types" - "io" - "log" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/sync/errgroup" - - "golang.org/x/tools/go/gcexportdata" - "golang.org/x/tools/internal/gocommand" - "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" -) - -// A LoadMode controls the amount of detail to return when loading. -// The bits below can be combined to specify which fields should be -// filled in the result packages. -// The zero value is a special case, equivalent to combining -// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. -// ID and Errors (if present) will always be filled. -// Load may return more information than requested. -type LoadMode int - -const ( - // NeedName adds Name and PkgPath. - NeedName LoadMode = 1 << iota - - // NeedFiles adds GoFiles and OtherFiles. - NeedFiles - - // NeedCompiledGoFiles adds CompiledGoFiles. - NeedCompiledGoFiles - - // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain - // "placeholder" Packages with only the ID set. - NeedImports - - // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. - NeedDeps - - // NeedExportFile adds ExportFile. - NeedExportFile - - // NeedTypes adds Types, Fset, and IllTyped. - NeedTypes - - // NeedSyntax adds Syntax. - NeedSyntax - - // NeedTypesInfo adds TypesInfo. - NeedTypesInfo - - // NeedTypesSizes adds TypesSizes. - NeedTypesSizes - - // needInternalDepsErrors adds the internal deps errors field for use by gopls. - needInternalDepsErrors - - // needInternalForTest adds the internal forTest field. - // Tests must also be set on the context for this field to be populated. - needInternalForTest - - // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. - // Modifies CompiledGoFiles and Types, and has no effect on its own. - typecheckCgo - - // NeedModule adds Module. - NeedModule - - // NeedEmbedFiles adds EmbedFiles. - NeedEmbedFiles - - // NeedEmbedPatterns adds EmbedPatterns. - NeedEmbedPatterns -) - -const ( - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadImports = LoadFiles | NeedImports - - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. - LoadAllSyntax = LoadSyntax | NeedDeps - - // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. - NeedExportsFile = NeedExportFile -) - -// A Config specifies details about how packages should be loaded. -// The zero value is a valid configuration. -// Calls to Load do not modify this struct. -type Config struct { - // Mode controls the level of information returned for each package. - Mode LoadMode - - // Context specifies the context for the load operation. - // Cancelling the context may cause [Load] to abort and - // return an error. - Context context.Context - - // Logf is the logger for the config. - // If the user provides a logger, debug logging is enabled. - // If the GOPACKAGESDEBUG environment variable is set to true, - // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) - - // Dir is the directory in which to run the build system's query tool - // that provides information about the packages. - // If Dir is empty, the tool is run in the current directory. - Dir string - - // Env is the environment to use when invoking the build system's query tool. - // If Env is nil, the current environment is used. - // As in os/exec's Cmd, only the last value in the slice for - // each environment key is used. To specify the setting of only - // a few variables, append to the current environment, as in: - // - // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") - // - Env []string - - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - - // BuildFlags is a list of command-line flags to be passed through to - // the build system's query tool. - BuildFlags []string - - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - - // Fset provides source position information for syntax trees and types. - // If Fset is nil, Load will use a new fileset, but preserve Fset's value. - Fset *token.FileSet - - // ParseFile is called to read and parse each file - // when preparing a package's type-checked syntax tree. - // It must be safe to call ParseFile simultaneously from multiple goroutines. - // If ParseFile is nil, the loader will uses parser.ParseFile. - // - // ParseFile should parse the source from src and use filename only for - // recording position information. - // - // An application may supply a custom implementation of ParseFile - // to change the effective file contents or the behavior of the parser, - // or to modify the syntax tree. For example, selectively eliminating - // unwanted function bodies can significantly accelerate type checking. - ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) - - // If Tests is set, the loader includes not just the packages - // matching a particular pattern but also any related test packages, - // including test-only variants of the package and the test executable. - // - // For example, when using the go command, loading "fmt" with Tests=true - // returns four packages, with IDs "fmt" (the standard package), - // "fmt [fmt.test]" (the package as compiled for the test), - // "fmt_test" (the test functions from source files in package fmt_test), - // and "fmt.test" (the test binary). - // - // In build systems with explicit names for tests, - // setting Tests may have no effect. - Tests bool - - // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the - // alternative file contents provided by the map. - // - // Overlays provide incomplete support for when a given file doesn't - // already exist on disk. See the package doc above for more details. - Overlay map[string][]byte -} - -// Load loads and returns the Go packages named by the given patterns. -// -// Config specifies loading options; -// nil behaves the same as an empty Config. -// -// If any of the patterns was invalid as defined by the -// underlying build system, Load returns an error. -// It may return an empty list of packages without an error, -// for instance for an empty expansion of a valid wildcard. -// Errors associated with a particular package are recorded in the -// corresponding Package's Errors list, and do not cause Load to -// return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is -// provided for convenient display of all errors. -func Load(cfg *Config, patterns ...string) ([]*Package, error) { - ld := newLoader(cfg) - response, external, err := defaultDriver(&ld.Config, patterns...) - if err != nil { - return nil, err - } - - ld.sizes = types.SizesFor(response.Compiler, response.Arch) - if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { - // Type size information is needed but unavailable. - if external { - // An external driver may fail to populate the Compiler/GOARCH fields, - // especially since they are relatively new (see #63700). - // Provide a sensible fallback in this case. - ld.sizes = types.SizesFor("gc", runtime.GOARCH) - if ld.sizes == nil { // gccgo-only arch - ld.sizes = types.SizesFor("gc", "amd64") - } - } else { - // Go list should never fail to deliver accurate size information. - // Reject the whole Load since the error is the same for every package. - return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", - response.Compiler, response.Arch) - } - } - - return ld.refine(response) -} - -// defaultDriver is a driver that implements go/packages' fallback behavior. -// It will try to request to an external driver, if one exists. If there's -// no external driver, or the driver returns a response with NotHandled set, -// defaultDriver will fall back to the go list driver. -// The boolean result indicates that an external driver handled the request. -func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { - const ( - // windowsArgMax specifies the maximum command line length for - // the Windows' CreateProcess function. - windowsArgMax = 32767 - // maxEnvSize is a very rough estimation of the maximum environment - // size of a user. - maxEnvSize = 16384 - // safeArgMax specifies the maximum safe command line length to use - // by the underlying driver excl. the environment. We choose the Windows' - // ARG_MAX as the starting point because it's one of the lowest ARG_MAX - // constants out of the different supported platforms, - // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results. - safeArgMax = windowsArgMax - maxEnvSize - ) - chunks, err := splitIntoChunks(patterns, safeArgMax) - if err != nil { - return nil, false, err - } - - if driver := findExternalDriver(cfg); driver != nil { - response, err := callDriverOnChunks(driver, cfg, chunks) - if err != nil { - return nil, false, err - } else if !response.NotHandled { - return response, true, nil - } - // (fall through) - } - - response, err := callDriverOnChunks(goListDriver, cfg, chunks) - if err != nil { - return nil, false, err - } - return response, false, err -} - -// splitIntoChunks chunks the slice so that the total number of characters -// in a chunk is no longer than argMax. -func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { - if argMax <= 0 { - return nil, errors.New("failed to split patterns into chunks, negative safe argMax value") - } - var chunks [][]string - charsInChunk := 0 - nextChunkStart := 0 - for i, v := range patterns { - vChars := len(v) - if vChars > argMax { - // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen - return nil, errors.New("failed to split patterns into chunks, a pattern is too long") - } - charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too - if charsInChunk > argMax { - chunks = append(chunks, patterns[nextChunkStart:i]) - nextChunkStart = i - charsInChunk = vChars - } - } - // add the last chunk - if nextChunkStart < len(patterns) { - chunks = append(chunks, patterns[nextChunkStart:]) - } - return chunks, nil -} - -func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { - if len(chunks) == 0 { - return driver(cfg) - } - responses := make([]*DriverResponse, len(chunks)) - errNotHandled := errors.New("driver returned NotHandled") - var g errgroup.Group - for i, chunk := range chunks { - i := i - chunk := chunk - g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) - if responses[i] != nil && responses[i].NotHandled { - err = errNotHandled - } - return err - }) - } - if err := g.Wait(); err != nil { - if errors.Is(err, errNotHandled) { - return &DriverResponse{NotHandled: true}, nil - } - return nil, err - } - return mergeResponses(responses...), nil -} - -func mergeResponses(responses ...*DriverResponse) *DriverResponse { - if len(responses) == 0 { - return nil - } - response := newDeduper() - response.dr.NotHandled = false - response.dr.Compiler = responses[0].Compiler - response.dr.Arch = responses[0].Arch - response.dr.GoVersion = responses[0].GoVersion - for _, v := range responses { - response.addAll(v) - } - return response.dr -} - -// A Package describes a loaded Go package. -type Package struct { - // ID is a unique identifier for a package, - // in a syntax provided by the underlying build system. - // - // Because the syntax varies based on the build system, - // clients should treat IDs as opaque and not attempt to - // interpret them. - ID string - - // Name is the package name as it appears in the package source code. - Name string - - // PkgPath is the package path as used by the go/types package. - PkgPath string - - // Errors contains any errors encountered querying the metadata - // of the package, or while parsing or type-checking its files. - Errors []Error - - // TypeErrors contains the subset of errors produced during type checking. - TypeErrors []types.Error - - // GoFiles lists the absolute file paths of the package's Go source files. - // It may include files that should not be compiled, for example because - // they contain non-matching build tags, are documentary pseudo-files such as - // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. - GoFiles []string - - // CompiledGoFiles lists the absolute file paths of the package's source - // files that are suitable for type checking. - // This may differ from GoFiles if files are processed before compilation. - CompiledGoFiles []string - - // OtherFiles lists the absolute file paths of the package's non-Go source files, - // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. - OtherFiles []string - - // EmbedFiles lists the absolute file paths of the package's files - // embedded with go:embed. - EmbedFiles []string - - // EmbedPatterns lists the absolute file patterns of the package's - // files embedded with go:embed. - EmbedPatterns []string - - // IgnoredFiles lists source files that are not part of the package - // using the current build configuration but that might be part of - // the package using other build configurations. - IgnoredFiles []string - - // ExportFile is the absolute path to a file containing type - // information for the package as provided by the build system. - ExportFile string - - // Imports maps import paths appearing in the package's Go source files - // to corresponding loaded Packages. - Imports map[string]*Package - - // Types provides type information for the package. - // The NeedTypes LoadMode bit sets this field for packages matching the - // patterns; type information for dependencies may be missing or incomplete, - // unless NeedDeps and NeedImports are also set. - // - // Each call to [Load] returns a consistent set of type - // symbols, as defined by the comment at [types.Identical]. - // Avoid mixing type information from two or more calls to [Load]. - Types *types.Package - - // Fset provides position information for Types, TypesInfo, and Syntax. - // It is set only when Types is set. - Fset *token.FileSet - - // IllTyped indicates whether the package or any dependency contains errors. - // It is set only when Types is set. - IllTyped bool - - // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. - // - // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. - // If NeedDeps and NeedImports are also set, this field will also be populated - // for dependencies. - // - // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are - // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. - Syntax []*ast.File - - // TypesInfo provides type information about the package's syntax trees. - // It is set only when Syntax is set. - TypesInfo *types.Info - - // TypesSizes provides the effective size function for types in TypesInfo. - TypesSizes types.Sizes - - // forTest is the package under test, if any. - forTest string - - // depsErrors is the DepsErrors field from the go list response, if any. - depsErrors []*packagesinternal.PackageError - - // module is the module information for the package if it exists. - Module *Module -} - -// Module provides module information for a package. -type Module struct { - Path string // module path - Version string // module version - Replace *Module // replaced by this module - Time *time.Time // time version was created - Main bool // is this the main module? - Indirect bool // is this module only an indirect dependency of main module? - Dir string // directory holding files for this module, if any - GoMod string // path to go.mod file used when loading this module, if any - GoVersion string // go version used in module - Error *ModuleError // error loading module -} - -// ModuleError holds errors loading a module. -type ModuleError struct { - Err string // the error itself -} - -func init() { - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { - return p.(*Package).depsErrors - } - packagesinternal.SetModFile = func(config interface{}, value string) { - config.(*Config).modFile = value - } - packagesinternal.SetModFlag = func(config interface{}, value string) { - config.(*Config).modFlag = value - } - packagesinternal.TypecheckCgo = int(typecheckCgo) - packagesinternal.DepsErrors = int(needInternalDepsErrors) - packagesinternal.ForTest = int(needInternalForTest) -} - -// An Error describes a problem with a package's metadata, syntax, or types. -type Error struct { - Pos string // "file:line:col" or "file:line" or "" or "-" - Msg string - Kind ErrorKind -} - -// ErrorKind describes the source of the error, allowing the user to -// differentiate between errors generated by the driver, the parser, or the -// type-checker. -type ErrorKind int - -const ( - UnknownError ErrorKind = iota - ListError - ParseError - TypeError -) - -func (err Error) Error() string { - pos := err.Pos - if pos == "" { - pos = "-" // like token.Position{}.String() - } - return pos + ": " + err.Msg -} - -// flatPackage is the JSON form of Package -// It drops all the type and syntax fields, and transforms the Imports -// -// TODO(adonovan): identify this struct with Package, effectively -// publishing the JSON protocol. -type flatPackage struct { - ID string - Name string `json:",omitempty"` - PkgPath string `json:",omitempty"` - Errors []Error `json:",omitempty"` - GoFiles []string `json:",omitempty"` - CompiledGoFiles []string `json:",omitempty"` - OtherFiles []string `json:",omitempty"` - EmbedFiles []string `json:",omitempty"` - EmbedPatterns []string `json:",omitempty"` - IgnoredFiles []string `json:",omitempty"` - ExportFile string `json:",omitempty"` - Imports map[string]string `json:",omitempty"` -} - -// MarshalJSON returns the Package in its JSON form. -// For the most part, the structure fields are written out unmodified, and -// the type and syntax fields are skipped. -// The imports are written out as just a map of path to package id. -// The errors are written using a custom type that tries to preserve the -// structure of error types we know about. -// -// This method exists to enable support for additional build systems. It is -// not intended for use by clients of the API and we may change the format. -func (p *Package) MarshalJSON() ([]byte, error) { - flat := &flatPackage{ - ID: p.ID, - Name: p.Name, - PkgPath: p.PkgPath, - Errors: p.Errors, - GoFiles: p.GoFiles, - CompiledGoFiles: p.CompiledGoFiles, - OtherFiles: p.OtherFiles, - EmbedFiles: p.EmbedFiles, - EmbedPatterns: p.EmbedPatterns, - IgnoredFiles: p.IgnoredFiles, - ExportFile: p.ExportFile, - } - if len(p.Imports) > 0 { - flat.Imports = make(map[string]string, len(p.Imports)) - for path, ipkg := range p.Imports { - flat.Imports[path] = ipkg.ID - } - } - return json.Marshal(flat) -} - -// UnmarshalJSON reads in a Package from its JSON format. -// See MarshalJSON for details about the format accepted. -func (p *Package) UnmarshalJSON(b []byte) error { - flat := &flatPackage{} - if err := json.Unmarshal(b, &flat); err != nil { - return err - } - *p = Package{ - ID: flat.ID, - Name: flat.Name, - PkgPath: flat.PkgPath, - Errors: flat.Errors, - GoFiles: flat.GoFiles, - CompiledGoFiles: flat.CompiledGoFiles, - OtherFiles: flat.OtherFiles, - EmbedFiles: flat.EmbedFiles, - EmbedPatterns: flat.EmbedPatterns, - ExportFile: flat.ExportFile, - } - if len(flat.Imports) > 0 { - p.Imports = make(map[string]*Package, len(flat.Imports)) - for path, id := range flat.Imports { - p.Imports[path] = &Package{ID: id} - } - } - return nil -} - -func (p *Package) String() string { return p.ID } - -// loaderPackage augments Package with state used during the loading phase -type loaderPackage struct { - *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH -} - -// loader holds the working state of a single call to load. -type loader struct { - pkgs map[string]*loaderPackage - Config - sizes types.Sizes // non-nil if needed by mode - parseCache map[string]*parseValue - parseCacheMu sync.Mutex - exportMu sync.Mutex // enforces mutual exclusion of exportdata operations - - // Config.Mode contains the implied mode (see impliedLoadMode). - // Implied mode contains all the fields we need the data for. - // In requestedMode there are the actually requested fields. - // We'll zero them out before returning packages to the user. - // This makes it easier for us to get the conditions where - // we need certain modes right. - requestedMode LoadMode -} - -type parseValue struct { - f *ast.File - err error - ready chan struct{} -} - -func newLoader(cfg *Config) *loader { - ld := &loader{ - parseCache: map[string]*parseValue{}, - } - if cfg != nil { - ld.Config = *cfg - // If the user has provided a logger, use it. - ld.Config.Logf = cfg.Logf - } - if ld.Config.Logf == nil { - // If the GOPACKAGESDEBUG environment variable is set to true, - // but the user has not provided a logger, default to log.Printf. - if debug { - ld.Config.Logf = log.Printf - } else { - ld.Config.Logf = func(format string, args ...interface{}) {} - } - } - if ld.Config.Mode == 0 { - ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. - } - if ld.Config.Env == nil { - ld.Config.Env = os.Environ() - } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } - if ld.Context == nil { - ld.Context = context.Background() - } - if ld.Dir == "" { - if dir, err := os.Getwd(); err == nil { - ld.Dir = dir - } - } - - // Save the actually requested fields. We'll zero them out before returning packages to the user. - ld.requestedMode = ld.Mode - ld.Mode = impliedLoadMode(ld.Mode) - - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - if ld.Fset == nil { - ld.Fset = token.NewFileSet() - } - - // ParseFile is required even in LoadTypes mode - // because we load source if export data is missing. - if ld.ParseFile == nil { - ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { - const mode = parser.AllErrors | parser.ParseComments - return parser.ParseFile(fset, filename, src, mode) - } - } - } - - return ld -} - -// refine connects the supplied packages into a graph and then adds type -// and syntax information as requested by the LoadMode. -func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { - roots := response.Roots - rootMap := make(map[string]int, len(roots)) - for i, root := range roots { - rootMap[root] = i - } - ld.pkgs = make(map[string]*loaderPackage) - // first pass, fixup and build the map and roots - var initial = make([]*loaderPackage, len(roots)) - for _, pkg := range response.Packages { - rootIndex := -1 - if i, found := rootMap[pkg.ID]; found { - rootIndex = i - } - - // Overlays can invalidate export data. - // TODO(matloob): make this check fine-grained based on dependencies on overlaid files - exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" - // This package needs type information if the caller requested types and the package is - // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) - // This package needs source if the call requested source (or types info, which implies source) - // and the package is either a root, or itas a non- root and the user requested dependencies... - needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || - // ... or if we need types and the exportData is invalid. We fall back to (incompletely) - // typechecking packages from source if they fail to compile. - (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" - lpkg := &loaderPackage{ - Package: pkg, - needtypes: needtypes, - needsrc: needsrc, - goVersion: response.GoVersion, - } - ld.pkgs[lpkg.ID] = lpkg - if rootIndex >= 0 { - initial[rootIndex] = lpkg - lpkg.initial = true - } - } - for i, root := range roots { - if initial[i] == nil { - return nil, fmt.Errorf("root package %v is missing", root) - } - } - - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - - const ( - white = 0 // new - grey = 1 // in progress - black = 2 // complete - ) - - // visit traverses the import graph, depth-first, - // and materializes the graph as Packages.Imports. - // - // Valid imports are saved in the Packages.Import map. - // Invalid imports (cycles and missing nodes) are saved in the importErrors map. - // Thus, even in the presence of both kinds of errors, - // the Import graph remains a DAG. - // - // visit returns whether the package needs src or has a transitive - // dependency on a package that does. These are the only packages - // for which we load source code. - var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: - panic("internal error: grey node") - } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) - } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) - } - lpkg.importErrors[importPath] = importErr - continue - } - - if visit(imp) { - lpkg.needsrc = true - } - lpkg.Imports[importPath] = imp.Package - } - - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true - } - } - - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - - return lpkg.needsrc - } - - // For each initial package, create its import DAG. - for _, lpkg := range initial { - visit(lpkg) - } - - } else { - // !NeedImports: drop the stub (ID-only) import packages - // that we are not even going to try to resolve. - for _, lpkg := range initial { - lpkg.Imports = nil - } - } - - // Load type data and syntax if needed, starting at - // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) - } - wg.Wait() - } - - // If the context is done, return its error and - // throw out [likely] incomplete packages. - if err := ld.Context.Err(); err != nil { - return nil, err - } - - result := make([]*Package, len(initial)) - for i, lpkg := range initial { - result[i] = lpkg.Package - } - for i := range ld.pkgs { - // Clear all unrequested fields, - // to catch programs that use more than they request. - if ld.requestedMode&NeedName == 0 { - ld.pkgs[i].Name = "" - ld.pkgs[i].PkgPath = "" - } - if ld.requestedMode&NeedFiles == 0 { - ld.pkgs[i].GoFiles = nil - ld.pkgs[i].OtherFiles = nil - ld.pkgs[i].IgnoredFiles = nil - } - if ld.requestedMode&NeedEmbedFiles == 0 { - ld.pkgs[i].EmbedFiles = nil - } - if ld.requestedMode&NeedEmbedPatterns == 0 { - ld.pkgs[i].EmbedPatterns = nil - } - if ld.requestedMode&NeedCompiledGoFiles == 0 { - ld.pkgs[i].CompiledGoFiles = nil - } - if ld.requestedMode&NeedImports == 0 { - ld.pkgs[i].Imports = nil - } - if ld.requestedMode&NeedExportFile == 0 { - ld.pkgs[i].ExportFile = "" - } - if ld.requestedMode&NeedTypes == 0 { - ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil - ld.pkgs[i].IllTyped = false - } - if ld.requestedMode&NeedSyntax == 0 { - ld.pkgs[i].Syntax = nil - } - if ld.requestedMode&NeedTypesInfo == 0 { - ld.pkgs[i].TypesInfo = nil - } - if ld.requestedMode&NeedTypesSizes == 0 { - ld.pkgs[i].TypesSizes = nil - } - if ld.requestedMode&NeedModule == 0 { - ld.pkgs[i].Module = nil - } - } - - return result, nil -} - -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. -// It must be called only once per Package, -// after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. -func (ld *loader) loadPackage(lpkg *loaderPackage) { - if lpkg.PkgPath == "unsafe" { - // Fill in the blanks to avoid surprises. - lpkg.Types = types.Unsafe - lpkg.Fset = ld.Fset - lpkg.Syntax = []*ast.File{} - lpkg.TypesInfo = new(types.Info) - lpkg.TypesSizes = ld.sizes - return - } - - // Call NewPackage directly with explicit name. - // This avoids skew between golist and go/types when the files' - // package declarations are inconsistent. - lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) - lpkg.Fset = ld.Fset - - // Start shutting down if the context is done and do not load - // source or export data files. - // Packages that import this one will have ld.Context.Err() != nil. - // ld.Context.Err() will be returned later by refine. - if ld.Context.Err() != nil { - return - } - - // Subtle: we populate all Types fields with an empty Package - // before loading export data so that export data processing - // never has to create a types.Package for an indirect dependency, - // which would then require that such created packages be explicitly - // inserted back into the Import graph as a final step after export data loading. - // (Hence this return is after the Types assignment.) - // The Diamond test exercises this case. - if !lpkg.needtypes && !lpkg.needsrc { - return - } - if !lpkg.needsrc { - if err := ld.loadFromExportData(lpkg); err != nil { - lpkg.Errors = append(lpkg.Errors, Error{ - Pos: "-", - Msg: err.Error(), - Kind: UnknownError, // e.g. can't find/open/parse export data - }) - } - return // not a source package, don't get syntax trees - } - - appendError := func(err error) { - // Convert various error types into the one true Error. - var errs []Error - switch err := err.(type) { - case Error: - // from driver - errs = append(errs, err) - - case *os.PathError: - // from parser - errs = append(errs, Error{ - Pos: err.Path + ":1", - Msg: err.Err.Error(), - Kind: ParseError, - }) - - case scanner.ErrorList: - // from parser - for _, err := range err { - errs = append(errs, Error{ - Pos: err.Pos.String(), - Msg: err.Msg, - Kind: ParseError, - }) - } - - case types.Error: - // from type checker - lpkg.TypeErrors = append(lpkg.TypeErrors, err) - errs = append(errs, Error{ - Pos: err.Fset.Position(err.Pos).String(), - Msg: err.Msg, - Kind: TypeError, - }) - - default: - // unexpected impoverished error from parser? - errs = append(errs, Error{ - Pos: "-", - Msg: err.Error(), - Kind: UnknownError, - }) - - // If you see this error message, please file a bug. - log.Printf("internal error: error %q (%T) without position", err, err) - } - - lpkg.Errors = append(lpkg.Errors, errs...) - } - - // If the go command on the PATH is newer than the runtime, - // then the go/{scanner,ast,parser,types} packages from the - // standard library may be unable to process the files - // selected by go list. - // - // There is currently no way to downgrade the effective - // version of the go command (see issue 52078), so we proceed - // with the newer go command but, in case of parse or type - // errors, we emit an additional diagnostic. - // - // See: - // - golang.org/issue/52078 (flag to set release tags) - // - golang.org/issue/50825 (gopls legacy version support) - // - golang.org/issue/55883 (go/packages confusing error) - // - // Should we assert a hard minimum of (currently) go1.16 here? - var runtimeVersion int - if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { - defer func() { - if len(lpkg.Errors) > 0 { - appendError(Error{ - Pos: "-", - Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), - Kind: UnknownError, - }) - } - }() - } - - if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { - // The config requested loading sources and types, but sources are missing. - // Add an error to the package and fall back to loading from export data. - appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) - _ = ld.loadFromExportData(lpkg) // ignore any secondary errors - - return // can't get syntax trees for this package - } - - files, errs := ld.parseFiles(lpkg.CompiledGoFiles) - for _, err := range errs { - appendError(err) - } - - lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { - return - } - - // Start shutting down if the context is done and do not type check. - // Packages that import this one will have ld.Context.Err() != nil. - // ld.Context.Err() will be returned later by refine. - if ld.Context.Err() != nil { - return - } - - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - } - versions.InitFileVersions(lpkg.TypesInfo) - lpkg.TypesSizes = ld.sizes - - importer := importerFunc(func(path string) (*types.Package, error) { - if path == "unsafe" { - return types.Unsafe, nil - } - - // The imports map is keyed by import path. - ipkg := lpkg.Imports[path] - if ipkg == nil { - if err := lpkg.importErrors[path]; err != nil { - return nil, err - } - // There was skew between the metadata and the - // import declarations, likely due to an edit - // race, or because the ParseFile feature was - // used to supply alternative file contents. - return nil, fmt.Errorf("no metadata for %s", path) - } - - if ipkg.Types != nil && ipkg.Types.Complete() { - return ipkg.Types, nil - } - log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) - panic("unreachable") - }) - - // type-check - tc := &types.Config{ - Importer: importer, - - // Type-check bodies of functions only in initial packages. - // Example: for import graph A->B->C and initial packages {A,C}, - // we can ignore function bodies in B. - IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, - - Error: appendError, - Sizes: ld.sizes, // may be nil - } - if lpkg.Module != nil && lpkg.Module.GoVersion != "" { - tc.GoVersion = "go" + lpkg.Module.GoVersion - } - if (ld.Mode & typecheckCgo) != 0 { - if !typesinternal.SetUsesCgo(tc) { - appendError(Error{ - Msg: "typecheckCgo requires Go 1.15+", - Kind: ListError, - }) - return - } - } - - typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) - lpkg.importErrors = nil // no longer needed - - // In go/types go1.21 and go1.22, Checker.Files failed fast with a - // a "too new" error, without calling tc.Error and without - // proceeding to type-check the package (#66525). - // We rely on the runtimeVersion error to give the suggested remedy. - if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 { - if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") { - appendError(types.Error{ - Fset: ld.Fset, - Pos: lpkg.Syntax[0].Package, - Msg: msg, - }) - } - } - - // If !Cgo, the type-checker uses FakeImportC mode, so - // it doesn't invoke the importer for import "C", - // nor report an error for the import, - // or for any undefined C.f reference. - // We must detect this explicitly and correctly - // mark the package as IllTyped (by reporting an error). - // TODO(adonovan): if these errors are annoying, - // we could just set IllTyped quietly. - if tc.FakeImportC { - outer: - for _, f := range lpkg.Syntax { - for _, imp := range f.Imports { - if imp.Path.Value == `"C"` { - err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} - appendError(err) - break outer - } - } - } - } - - // If types.Checker.Files had an error that was unreported, - // make sure to report the unknown error so the package is illTyped. - if typErr != nil && len(lpkg.Errors) == 0 { - appendError(typErr) - } - - // Record accumulated errors. - illTyped := len(lpkg.Errors) > 0 - if !illTyped { - for _, imp := range lpkg.Imports { - if imp.IllTyped { - illTyped = true - break - } - } - } - lpkg.IllTyped = illTyped -} - -// An importFunc is an implementation of the single-method -// types.Importer interface based on a function value. -type importerFunc func(path string) (*types.Package, error) - -func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } - -// We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) - -func (ld *loader) parseFile(filename string) (*ast.File, error) { - ld.parseCacheMu.Lock() - v, ok := ld.parseCache[filename] - if ok { - // cache hit - ld.parseCacheMu.Unlock() - <-v.ready - } else { - // cache miss - v = &parseValue{ready: make(chan struct{})} - ld.parseCache[filename] = v - ld.parseCacheMu.Unlock() - - var src []byte - for f, contents := range ld.Config.Overlay { - if sameFile(f, filename) { - src = contents - } - } - var err error - if src == nil { - ioLimit <- true // wait - src, err = os.ReadFile(filename) - <-ioLimit // signal - } - if err != nil { - v.err = err - } else { - v.f, v.err = ld.ParseFile(ld.Fset, filename, src) - } - - close(v.ready) - } - return v.f, v.err -} - -// parseFiles reads and parses the Go source files and returns the ASTs -// of the ones that could be at least partially parsed, along with a -// list of I/O and parse errors encountered. -// -// Because files are scanned in parallel, the token.Pos -// positions of the resulting ast.Files are not ordered. -func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { - parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) - } - wg.Wait() - - // Eliminate nils, preserving order. - var o int - for _, f := range parsed { - if f != nil { - parsed[o] = f - o++ - } - } - parsed = parsed[:o] - - o = 0 - for _, err := range errors { - if err != nil { - errors[o] = err - o++ - } - } - errors = errors[:o] - - return parsed, errors -} - -// sameFile returns true if x and y have the same basename and denote -// the same file. -func sameFile(x, y string) bool { - if x == y { - // It could be the case that y doesn't exist. - // For instance, it may be an overlay file that - // hasn't been written to disk. To handle that case - // let x == y through. (We added the exact absolute path - // string to the CompiledGoFiles list, so the unwritten - // overlay case implies x==y.) - return true - } - if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) - if xi, err := os.Stat(x); err == nil { - if yi, err := os.Stat(y); err == nil { - return os.SameFile(xi, yi) - } - } - } - return false -} - -// loadFromExportData ensures that type information is present for the specified -// package, loading it from an export data file on the first request. -// On success it sets lpkg.Types to a new Package. -func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { - if lpkg.PkgPath == "" { - log.Fatalf("internal error: Package %s has no PkgPath", lpkg) - } - - // Because gcexportdata.Read has the potential to create or - // modify the types.Package for each node in the transitive - // closure of dependencies of lpkg, all exportdata operations - // must be sequential. (Finer-grained locking would require - // changes to the gcexportdata API.) - // - // The exportMu lock guards the lpkg.Types field and the - // types.Package it points to, for each loaderPackage in the graph. - // - // Not all accesses to Package.Pkg need to be protected by exportMu: - // graph ordering ensures that direct dependencies of source - // packages are fully loaded before the importer reads their Pkg field. - ld.exportMu.Lock() - defer ld.exportMu.Unlock() - - if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { - return nil // cache hit - } - - lpkg.IllTyped = true // fail safe - - if lpkg.ExportFile == "" { - // Errors while building export data will have been printed to stderr. - return fmt.Errorf("no export data file") - } - f, err := os.Open(lpkg.ExportFile) - if err != nil { - return err - } - defer f.Close() - - // Read gc export data. - // - // We don't currently support gccgo export data because all - // underlying workspaces use the gc toolchain. (Even build - // systems that support gccgo don't use it for workspace - // queries.) - r, err := gcexportdata.NewReader(f) - if err != nil { - return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) - } - - // Build the view. - // - // The gcexportdata machinery has no concept of package ID. - // It identifies packages by their PkgPath, which although not - // globally unique is unique within the scope of one invocation - // of the linker, type-checker, or gcexportdata. - // - // So, we must build a PkgPath-keyed view of the global - // (conceptually ID-keyed) cache of packages and pass it to - // gcexportdata. The view must contain every existing - // package that might possibly be mentioned by the - // current package---its transitive closure. - // - // In loadPackage, we unconditionally create a types.Package for - // each dependency so that export data loading does not - // create new ones. - // - // TODO(adonovan): it would be simpler and more efficient - // if the export data machinery invoked a callback to - // get-or-create a package instead of a map. - // - view := make(map[string]*types.Package) // view seen by gcexportdata - seen := make(map[*loaderPackage]bool) // all visited packages - var visit func(pkgs map[string]*Package) - visit = func(pkgs map[string]*Package) { - for _, p := range pkgs { - lpkg := ld.pkgs[p.ID] - if !seen[lpkg] { - seen[lpkg] = true - view[lpkg.PkgPath] = lpkg.Types - visit(lpkg.Imports) - } - } - } - visit(lpkg.Imports) - - viewLen := len(view) + 1 // adding the self package - // Parse the export data. - // (May modify incomplete packages in view but not create new ones.) - tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) - if err != nil { - return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) - } - if _, ok := view["go.shape"]; ok { - // Account for the pseudopackage "go.shape" that gets - // created by generic code. - viewLen++ - } - if viewLen != len(view) { - log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) - } - - lpkg.Types = tpkg - lpkg.IllTyped = false - return nil -} - -// impliedLoadMode returns loadMode with its dependencies. -func impliedLoadMode(loadMode LoadMode) LoadMode { - if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { - // All these things require knowing the import graph. - loadMode |= NeedImports - } - - return loadMode -} - -func usesExportData(cfg *Config) bool { - return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 -} - -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go deleted file mode 100644 index a1dcc40b7..000000000 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packages - -import ( - "fmt" - "os" - "sort" -) - -// Visit visits all the packages in the import graph whose roots are -// pkgs, calling the optional pre function the first time each package -// is encountered (preorder), and the optional post function after a -// package's dependencies have been visited (postorder). -// The boolean result of pre(pkg) determines whether -// the imports of package pkg are visited. -func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { - seen := make(map[*Package]bool) - var visit func(*Package) - visit = func(pkg *Package) { - if !seen[pkg] { - seen[pkg] = true - - if pre == nil || pre(pkg) { - paths := make([]string, 0, len(pkg.Imports)) - for path := range pkg.Imports { - paths = append(paths, path) - } - sort.Strings(paths) // Imports is a map, this makes visit stable - for _, path := range paths { - visit(pkg.Imports[path]) - } - } - - if post != nil { - post(pkg) - } - } - } - for _, pkg := range pkgs { - visit(pkg) - } -} - -// PrintErrors prints to os.Stderr the accumulated errors of all -// packages in the import graph rooted at pkgs, dependencies first. -// PrintErrors returns the number of errors printed. -func PrintErrors(pkgs []*Package) int { - var n int - Visit(pkgs, nil, func(pkg *Package) { - for _, err := range pkg.Errors { - fmt.Fprintln(os.Stderr, err) - n++ - } - }) - return n -} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go deleted file mode 100644 index a2386c347..000000000 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package objectpath defines a naming scheme for types.Objects -// (that is, named entities in Go programs) relative to their enclosing -// package. -// -// Type-checker objects are canonical, so they are usually identified by -// their address in memory (a pointer), but a pointer has meaning only -// within one address space. By contrast, objectpath names allow the -// identity of an object to be sent from one program to another, -// establishing a correspondence between types.Object variables that are -// distinct but logically equivalent. -// -// A single object may have multiple paths. In this example, -// -// type A struct{ X int } -// type B A -// -// the field X has two paths due to its membership of both A and B. -// The For(obj) function always returns one of these paths, arbitrarily -// but consistently. -package objectpath - -import ( - "fmt" - "go/types" - "strconv" - "strings" - - "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/typesinternal" -) - -// TODO(adonovan): think about generic aliases. - -// A Path is an opaque name that identifies a types.Object -// relative to its package. Conceptually, the name consists of a -// sequence of destructuring operations applied to the package scope -// to obtain the original object. -// The name does not include the package itself. -type Path string - -// Encoding -// -// An object path is a textual and (with training) human-readable encoding -// of a sequence of destructuring operators, starting from a types.Package. -// The sequences represent a path through the package/object/type graph. -// We classify these operators by their type: -// -// PO package->object Package.Scope.Lookup -// OT object->type Object.Type -// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] -// TO type->object Type.{At,Field,Method,Obj} [AFMO] -// -// All valid paths start with a package and end at an object -// and thus may be defined by the regular language: -// -// objectpath = PO (OT TT* TO)* -// -// The concrete encoding follows directly: -// - The only PO operator is Package.Scope.Lookup, which requires an identifier. -// - The only OT operator is Object.Type, -// which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTC]; -// one of these (TypeParam) requires an integer operand, -// which is encoded as a string of decimal digits. -// - The TO operators are encoded as [AFMO]; -// three of these (At,Field,Method) require an integer operand, -// which is encoded as a string of decimal digits. -// These indices are stable across different representations -// of the same package, even source and export data. -// The indices used are implementation specific and may not correspond to -// the argument to the go/types function. -// -// In the example below, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// field X has the path "T.UM0.RA1.F0", -// representing the following sequence of operations: -// -// p.Lookup("T") T -// .Type().Underlying().Method(0). f -// .Type().Results().At(1) b -// .Type().Field(0) X -// -// The encoding is not maximally compact---every R or P is -// followed by an A, for example---but this simplifies the -// encoder and decoder. -const ( - // object->type operators - opType = '.' // .Type() (Object) - - // type->type operators - opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) - opKey = 'K' // .Key() (Map) - opParams = 'P' // .Params() (Signature) - opResults = 'R' // .Results() (Signature) - opUnderlying = 'U' // .Underlying() (Named) - opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) - opConstraint = 'C' // .Constraint() (TypeParam) - - // type->object operators - opAt = 'A' // .At(i) (Tuple) - opField = 'F' // .Field(i) (Struct) - opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) - opObj = 'O' // .Obj() (Named, TypeParam) -) - -// For is equivalent to new(Encoder).For(obj). -// -// It may be more efficient to reuse a single Encoder across several calls. -func For(obj types.Object) (Path, error) { - return new(Encoder).For(obj) -} - -// An Encoder amortizes the cost of encoding the paths of multiple objects. -// The zero value of an Encoder is ready to use. -type Encoder struct { - scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects -} - -// For returns the path to an object relative to its package, -// or an error if the object is not accessible from the package's Scope. -// -// The For function guarantees to return a path only for the following objects: -// - package-level types -// - exported package-level non-types -// - methods -// - parameter and result variables -// - struct fields -// These objects are sufficient to define the API of their package. -// The objects described by a package's export data are drawn from this set. -// -// The set of objects accessible from a package's Scope depends on -// whether the package was produced by type-checking syntax, or -// reading export data; the latter may have a smaller Scope since -// export data trims objects that are not reachable from an exported -// declaration. For example, the For function will return a path for -// an exported method of an unexported type that is not reachable -// from any public declaration; this path will cause the Object -// function to fail if called on a package loaded from export data. -// TODO(adonovan): is this a bug or feature? Should this package -// compute accessibility in the same way? -// -// For does not return a path for predeclared names, imported package -// names, local names, and unexported package-level names (except -// types). -// -// Example: given this definition, -// -// package p -// -// type T interface { -// f() (a string, b struct{ X int }) -// } -// -// For(X) would return a path that denotes the following sequence of operations: -// -// p.Scope().Lookup("T") (TypeName T) -// .Type().Underlying().Method(0). (method Func f) -// .Type().Results().At(1) (field Var b) -// .Type().Field(0) (field Var X) -// -// where p is the package (*types.Package) to which X belongs. -func (enc *Encoder) For(obj types.Object) (Path, error) { - pkg := obj.Pkg() - - // This table lists the cases of interest. - // - // Object Action - // ------ ------ - // nil reject - // builtin reject - // pkgname reject - // label reject - // var - // package-level accept - // func param/result accept - // local reject - // struct field accept - // const - // package-level accept - // local reject - // func - // package-level accept - // init functions reject - // concrete method accept - // interface method accept - // type - // package-level accept - // local reject - // - // The only accessible package-level objects are members of pkg itself. - // - // The cases are handled in four steps: - // - // 1. reject nil and builtin - // 2. accept package-level objects - // 3. reject obviously invalid objects - // 4. search the API for the path to the param/result/field/method. - - // 1. reference to nil or builtin? - if pkg == nil { - return "", fmt.Errorf("predeclared %s has no path", obj) - } - scope := pkg.Scope() - - // 2. package-level object? - if scope.Lookup(obj.Name()) == obj { - // Only exported objects (and non-exported types) have a path. - // Non-exported types may be referenced by other objects. - if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { - return "", fmt.Errorf("no path for non-exported %v", obj) - } - return Path(obj.Name()), nil - } - - // 3. Not a package-level object. - // Reject obviously non-viable cases. - switch obj := obj.(type) { - case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { - // With the exception of type parameters, only package-level type names - // have a path. - return "", fmt.Errorf("no path for %v", obj) - } - case *types.Const, // Only package-level constants have a path. - *types.Label, // Labels are function-local. - *types.PkgName: // PkgNames are file-local. - return "", fmt.Errorf("no path for %v", obj) - - case *types.Var: - // Could be: - // - a field (obj.IsField()) - // - a func parameter or result - // - a local var. - // Sadly there is no way to distinguish - // a param/result from a local - // so we must proceed to the find. - - case *types.Func: - // A func, if not package-level, must be a method. - if recv := obj.Type().(*types.Signature).Recv(); recv == nil { - return "", fmt.Errorf("func is not a method: %v", obj) - } - - if path, ok := enc.concreteMethod(obj); ok { - // Fast path for concrete methods that avoids looping over scope. - return path, nil - } - - default: - panic(obj) - } - - // 4. Search the API for the path to the var (field/param/result) or method. - - // First inspect package-level named types. - // In the presence of path aliases, these give - // the best paths because non-types may - // refer to types, but not the reverse. - empty := make([]byte, 0, 48) // initial space - objs := enc.scopeObjects(scope) - for _, o := range objs { - tname, ok := o.(*types.TypeName) - if !ok { - continue // handle non-types in second pass - } - - path := append(empty, o.Name()...) - path = append(path, opType) - - T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { - return Path(r), nil - } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { - // generic named type - return Path(r), nil - } - } - // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { - return Path(r), nil - } - } - } - - // Then inspect everything else: - // non-types, and declared methods of defined types. - for _, o := range objs { - path := append(empty, o.Name()...) - if _, ok := o.(*types.TypeName); !ok { - if o.Exported() { - // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { - return Path(r), nil - } - } - continue - } - - // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { - path = append(path, opType) - // The method index here is always with respect - // to the underlying go/types data structures, - // which ultimately derives from source order - // and must be preserved by export data. - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return Path(path2), nil // found declared method - } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { - return Path(r), nil - } - } - } - } - - return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) -} - -func appendOpArg(path []byte, op byte, arg int) []byte { - path = append(path, op) - path = strconv.AppendInt(path, int64(arg), 10) - return path -} - -// concreteMethod returns the path for meth, which must have a non-nil receiver. -// The second return value indicates success and may be false if the method is -// an interface method or if it is an instantiated method. -// -// This function is just an optimization that avoids the general scope walking -// approach. You are expected to fall back to the general approach if this -// function fails. -func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { - // Concrete methods can only be declared on package-scoped named types. For - // that reason we can skip the expensive walk over the package scope: the - // path will always be package -> named type -> method. We can trivially get - // the type name from the receiver, and only have to look over the type's - // methods to find the method index. - // - // Methods on generic types require special consideration, however. Consider - // the following package: - // - // L1: type S[T any] struct{} - // L2: func (recv S[A]) Foo() { recv.Bar() } - // L3: func (recv S[B]) Bar() { } - // L4: type Alias = S[int] - // L5: func _[T any]() { var s S[int]; s.Foo() } - // - // The receivers of methods on generic types are instantiations. L2 and L3 - // instantiate S with the type-parameters A and B, which are scoped to the - // respective methods. L4 and L5 each instantiate S with int. Each of these - // instantiations has its own method set, full of methods (and thus objects) - // with receivers whose types are the respective instantiations. In other - // words, we have - // - // S[A].Foo, S[A].Bar - // S[B].Foo, S[B].Bar - // S[int].Foo, S[int].Bar - // - // We may thus be trying to produce object paths for any of these objects. - // - // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo - // and S.Bar, which are the paths that this function naturally produces. - // - // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that - // don't correspond to the origin methods. For S[int], this is significant. - // The most precise object path for S[int].Foo, for example, is Alias.Foo, - // not S.Foo. Our function, however, would produce S.Foo, which would - // resolve to a different object. - // - // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are - // still the correct paths, since only the origin methods have meaningful - // paths. But this is likely only true for trivial cases and has edge cases. - // Since this function is only an optimization, we err on the side of giving - // up, deferring to the slower but definitely correct algorithm. Most users - // of objectpath will only be giving us origin methods, anyway, as referring - // to instantiated methods is usually not useful. - - if meth.Origin() != meth { - return "", false - } - - _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) - if named == nil { - return "", false - } - - if types.IsInterface(named) { - // Named interfaces don't have to be package-scoped - // - // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface - // methods, too, I think. - return "", false - } - - // Preallocate space for the name, opType, opMethod, and some digits. - name := named.Obj().Name() - path := make([]byte, 0, len(name)+8) - path = append(path, name...) - path = append(path, opType) - - // Method indices are w.r.t. the go/types data structures, - // ultimately deriving from source order, - // which is preserved by export data. - for i := 0; i < named.NumMethods(); i++ { - if named.Method(i) == meth { - path = appendOpArg(path, opMethod, i) - return Path(path), true - } - } - - // Due to golang/go#59944, go/types fails to associate the receiver with - // certain methods on cgo types. - // - // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go - // versions gopls supports. - return "", false - // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) -} - -// find finds obj within type T, returning the path to it, or nil if not found. -// -// The seen map is used to short circuit cycles through type parameters. If -// nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { - switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) - case *types.Basic, *types.Named: - // Named types belonging to pkg were handled already, - // so T must belong to another package. No path. - return nil - case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { - return r - } - return find(obj, T.Elem(), append(path, opElem), seen) - case *types.Signature: - if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { - return r - } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { - return r - } - return find(obj, T.Results(), append(path, opResults), seen) - case *types.Struct: - for i := 0; i < T.NumFields(); i++ { - fld := T.Field(i) - path2 := appendOpArg(path, opField, i) - if fld == obj { - return path2 // found field var - } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - v := T.At(i) - path2 := appendOpArg(path, opAt, i) - if v == obj { - return path2 // found param/result var - } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.Interface: - for i := 0; i < T.NumMethods(); i++ { - m := T.Method(i) - path2 := appendOpArg(path, opMethod, i) - if m == obj { - return path2 // found interface method - } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { - return r - } - } - return nil - case *types.TypeParam: - name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { - return nil - } - if seen == nil { - seen = make(map[*types.TypeName]bool) - } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { - return r - } - return nil - } - panic(T) -} - -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) - path2 := appendOpArg(path, opTypeParam, i) - if r := find(obj, tparam, path2, seen); r != nil { - return r - } - } - return nil -} - -// Object returns the object denoted by path p within the package pkg. -func Object(pkg *types.Package, p Path) (types.Object, error) { - pathstr := string(p) - if pathstr == "" { - return nil, fmt.Errorf("empty path") - } - - var pkgobj, suffix string - if dot := strings.IndexByte(pathstr, opType); dot < 0 { - pkgobj = pathstr - } else { - pkgobj = pathstr[:dot] - suffix = pathstr[dot:] // suffix starts with "." - } - - obj := pkg.Scope().Lookup(pkgobj) - if obj == nil { - return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) - } - - // abstraction of *types.{Pointer,Slice,Array,Chan,Map} - type hasElem interface { - Elem() types.Type - } - // abstraction of *types.{Named,Signature} - type hasTypeParams interface { - TypeParams() *types.TypeParamList - } - // abstraction of *types.{Named,TypeParam} - type hasObj interface { - Obj() *types.TypeName - } - - // The loop state is the pair (t, obj), - // exactly one of which is non-nil, initially obj. - // All suffixes start with '.' (the only object->type operation), - // followed by optional type->type operations, - // then a type->object operation. - // The cycle then repeats. - var t types.Type - for suffix != "" { - code := suffix[0] - suffix = suffix[1:] - - // Codes [AFM] have an integer operand. - var index int - switch code { - case opAt, opField, opMethod, opTypeParam: - rest := strings.TrimLeft(suffix, "0123456789") - numerals := suffix[:len(suffix)-len(rest)] - suffix = rest - i, err := strconv.Atoi(numerals) - if err != nil { - return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) - } - index = int(i) - case opObj: - // no operand - default: - // The suffix must end with a type->object operation. - if suffix == "" { - return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) - } - } - - if code == opType { - if t != nil { - return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) - } - t = obj.Type() - obj = nil - continue - } - - if t == nil { - return nil, fmt.Errorf("invalid path: code %q in object context", code) - } - - // Inv: t != nil, obj == nil - - t = aliases.Unalias(t) - switch code { - case opElem: - hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) - } - t = hasElem.Elem() - - case opKey: - mapType, ok := t.(*types.Map) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) - } - t = mapType.Key() - - case opParams: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Params() - - case opResults: - sig, ok := t.(*types.Signature) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) - } - t = sig.Results() - - case opUnderlying: - named, ok := t.(*types.Named) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) - } - t = named.Underlying() - - case opTypeParam: - hasTypeParams, ok := t.(hasTypeParams) // Named, Signature - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) - } - tparams := hasTypeParams.TypeParams() - if n := tparams.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - t = tparams.At(index) - - case opConstraint: - tparam, ok := t.(*types.TypeParam) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) - } - t = tparam.Constraint() - - case opAt: - tuple, ok := t.(*types.Tuple) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) - } - if n := tuple.Len(); index >= n { - return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) - } - obj = tuple.At(index) - t = nil - - case opField: - structType, ok := t.(*types.Struct) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) - } - if n := structType.NumFields(); index >= n { - return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) - } - obj = structType.Field(index) - t = nil - - case opMethod: - switch t := t.(type) { - case *types.Interface: - if index >= t.NumMethods() { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) - } - obj = t.Method(index) // Id-ordered - - case *types.Named: - if index >= t.NumMethods() { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) - } - obj = t.Method(index) - - default: - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) - } - t = nil - - case opObj: - hasObj, ok := t.(hasObj) - if !ok { - return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) - } - obj = hasObj.Obj() - t = nil - - default: - return nil, fmt.Errorf("invalid path: unknown code %q", code) - } - } - - if obj.Pkg() != pkg { - return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) - } - - return obj, nil // success -} - -// scopeObjects is a memoization of scope objects. -// Callers must not modify the result. -func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { - m := enc.scopeMemo - if m == nil { - m = make(map[*types.Scope][]types.Object) - enc.scopeMemo = m - } - objs, ok := m[scope] - if !ok { - names := scope.Names() // allocates and sorts - objs = make([]types.Object, len(names)) - for i, name := range names { - objs[i] = scope.Lookup(name) - } - m[scope] = objs - } - return objs -} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go deleted file mode 100644 index c24c2eee4..000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package aliases - -import ( - "go/token" - "go/types" -) - -// Package aliases defines backward compatible shims -// for the types.Alias type representation added in 1.22. -// This defines placeholders for x/tools until 1.26. - -// NewAlias creates a new TypeName in Package pkg that -// is an alias for the type rhs. -// -// The enabled parameter determines whether the resulting [TypeName]'s -// type is an [types.Alias]. Its value must be the result of a call to -// [Enabled], which computes the effective value of -// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled -// function is expensive and should be called once per task (e.g. -// package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { - if enabled { - tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) - return tname - } - return types.NewTypeName(pos, pkg, name, rhs) -} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index c027b9f31..000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go deleted file mode 100644 index b32995484..000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package aliases - -import ( - "go/ast" - "go/parser" - "go/token" - "go/types" -) - -// Alias is an alias of types.Alias. -type Alias = types.Alias - -// Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { - if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { - return alias.Rhs() // go1.23+ - } - - // go1.22's Alias didn't have the Rhs method, - // so Unalias is the best we can do. - return Unalias(alias) -} - -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a -} - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// This function is expensive! Call it sparingly. -func Enabled() bool { - // The only reliable way to compute the answer is to invoke go/types. - // We don't parse the GODEBUG environment variable, because - // (a) it's tricky to do so in a manner that is consistent - // with the godebug package; in particular, a simple - // substring check is not good enough. The value is a - // rightmost-wins list of options. But more importantly: - // (b) it is impossible to detect changes to the effective - // setting caused by os.Setenv("GODEBUG"), as happens in - // many tests. Therefore any attempt to cache the result - // is just incorrect. - fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) - pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) - _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) - return enabled -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go deleted file mode 100644 index d98b0db2a..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file contains the remaining vestiges of -// $GOROOT/src/go/internal/gcimporter/bimport.go. - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" - "sync" -) - -func errorf(format string, args ...interface{}) { - panic(fmt.Sprintf(format, args...)) -} - -const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go - -// Synthesize a token.Pos -type fakeFileSet struct { - fset *token.FileSet - files map[string]*fileInfo -} - -type fileInfo struct { - file *token.File - lastline int -} - -const maxlines = 64 * 1024 - -func (s *fakeFileSet) pos(file string, line, column int) token.Pos { - // TODO(mdempsky): Make use of column. - - // Since we don't know the set of needed file positions, we reserve maxlines - // positions per file. We delay calling token.File.SetLines until all - // positions have been calculated (by way of fakeFileSet.setLines), so that - // we can avoid setting unnecessary lines. See also golang/go#46586. - f := s.files[file] - if f == nil { - f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} - s.files[file] = f - } - if line > maxlines { - line = 1 - } - if line > f.lastline { - f.lastline = line - } - - // Return a fake position assuming that f.file consists only of newlines. - return token.Pos(f.file.Base() + line - 1) -} - -func (s *fakeFileSet) setLines() { - fakeLinesOnce.Do(func() { - fakeLines = make([]int, maxlines) - for i := range fakeLines { - fakeLines[i] = i - } - }) - for _, f := range s.files { - f.file.SetLines(fakeLines[:f.lastline]) - } -} - -var ( - fakeLines []int - fakeLinesOnce sync.Once -) - -func chanDir(d int) types.ChanDir { - // tag values must match the constants in cmd/compile/internal/gc/go.go - switch d { - case 1 /* Crecv */ : - return types.RecvOnly - case 2 /* Csend */ : - return types.SendOnly - case 3 /* Cboth */ : - return types.SendRecv - default: - errorf("unexpected channel dir %d", d) - return 0 - } -} - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go deleted file mode 100644 index f6437feb1..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. - -package gcimporter - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" -) - -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) - if err != nil { - return - } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) - } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") - return - } - name = strings.TrimSpace(string(hdr[:16])) - return -} - -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { - // Read first line to make sure this is an object file. - line, err := r.ReadSlice('\n') - if err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } - - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } - - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") - return - } - - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - hdr = string(line) - if size < 0 { - size = -1 - } - - return -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go deleted file mode 100644 index 39df91124..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. - -// Package gcimporter provides various functions for reading -// gc-generated object files that can be used to implement the -// Importer interface defined by the Go 1.5 standard library package. -// -// The encoding is deterministic: if the encoder is applied twice to -// the same types.Package data structure, both encodings are equal. -// This property may be important to avoid spurious changes in -// applications such as build systems. -// -// However, the encoder is not necessarily idempotent. Importing an -// exported package may yield a types.Package that, while it -// represents the same set of Go types as the original, may differ in -// the details of its internal representation. Because of these -// differences, re-encoding the imported package may yield a -// different, but equally valid, encoding of the package. -package gcimporter // import "golang.org/x/tools/internal/gcimporter" - -import ( - "bufio" - "bytes" - "fmt" - "go/build" - "go/token" - "go/types" - "io" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" -) - -const ( - // Enable debug during development: it adds some additional checks, and - // prevents errors from being recovered. - debug = false - - // If trace is set, debugging output is printed to std out. - trace = false -) - -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - -// Import imports a gc-generated package given its import path and srcDir, adds -// the corresponding package object to the packages map, and returns the object. -// The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { - var rc io.ReadCloser - var filename, id string - if lookup != nil { - // With custom lookup specified, assume that caller has - // converted path to a canonical import path for use in the map. - if path == "unsafe" { - return types.Unsafe, nil - } - id = path - - // No need to re-import if the package was imported completely before. - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - f, err := lookup(path) - if err != nil { - return nil, err - } - rc = f - } else { - filename, id = FindPkg(path, srcDir) - if filename == "" { - if path == "unsafe" { - return types.Unsafe, nil - } - return nil, fmt.Errorf("can't find import: %q", id) - } - - // no need to re-import if the package was imported completely before - if pkg = packages[id]; pkg != nil && pkg.Complete() { - return - } - - // open file - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - // add file name to error - err = fmt.Errorf("%s: %v", filename, err) - } - }() - rc = f - } - defer rc.Close() - - var hdr string - var size int64 - buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { - return - } - - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': // indexed, till go1.19 - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': // unified, from go1.20 - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } - - return -} - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go deleted file mode 100644 index deeb67f31..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ /dev/null @@ -1,1332 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "io" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - - "golang.org/x/tools/go/types/objectpath" - "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" -) - -// IExportShallow encodes "shallow" export data for the specified package. -// -// No promises are made about the encoding other than that it can be decoded by -// the same version of IIExportShallow. If you plan to save export data in the -// file system, be sure to include a cryptographic digest of the executable in -// the key to avoid version skew. -// -// If the provided reportf func is non-nil, it will be used for reporting bugs -// encountered during export. -// TODO(rfindley): remove reportf when we are confident enough in the new -// objectpath encoding. -func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { - // In principle this operation can only fail if out.Write fails, - // but that's impossible for bytes.Buffer---and as a matter of - // fact iexportCommon doesn't even check for I/O errors. - // TODO(adonovan): handle I/O errors properly. - // TODO(adonovan): use byte slices throughout, avoiding copying. - const bundle, shallow = false, true - var out bytes.Buffer - err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) - return out.Bytes(), err -} - -// IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. -// -// The importer calls getPackages to obtain package symbols for all -// packages mentioned in the export data, including the one being -// decoded. -// -// If the provided reportf func is non-nil, it will be used for reporting bugs -// encountered during import. -// TODO(rfindley): remove reportf when we are confident enough in the new -// objectpath encoding. -func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { - const bundle = false - const shallow = true - pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) - if err != nil { - return nil, err - } - return pkgs[0], nil -} - -// ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) - -// Current bundled export format version. Increase with each format change. -// 0: initial implementation -const bundleVersion = 0 - -// IExportData writes indexed export data for pkg to out. -// -// If no file set is provided, position info will be missing. -// The package path of the top-level package will not be recorded, -// so that calls to IImportData can override with a provided package path. -func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - const bundle, shallow = false, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) -} - -// IExportBundle writes an indexed export bundle for pkgs to out. -func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { - const bundle, shallow = true, false - return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) -} - -func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := iexporter{ - fset: fset, - version: version, - shallow: shallow, - allPkgs: map[*types.Package]bool{}, - stringIndex: map[string]uint64{}, - declIndex: map[types.Object]uint64{}, - tparamNames: map[types.Object]string{}, - typIndex: map[types.Type]uint64{}, - } - if !bundle { - p.localpkg = pkgs[0] - } - - for i, pt := range predeclared() { - p.typIndex[pt] = uint64(i) - } - if len(p.typIndex) > predeclReserved { - panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) - } - - // Initialize work queue with exported declarations. - for _, pkg := range pkgs { - scope := pkg.Scope() - for _, name := range scope.Names() { - if token.IsExported(name) { - p.pushDecl(scope.Lookup(name)) - } - } - - if bundle { - // Ensure pkg and its imports are included in the index. - p.allPkgs[pkg] = true - for _, imp := range pkg.Imports() { - p.allPkgs[imp] = true - } - } - } - - // Loop until no more work. - for !p.declTodo.empty() { - p.doDecl(p.declTodo.popHead()) - } - - // Produce index of offset of each file record in files. - var files intWriter - var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i - if p.shallow { - fileOffset = make([]uint64, len(p.fileInfos)) - for i, info := range p.fileInfos { - fileOffset[i] = uint64(files.Len()) - p.encodeFile(&files, info.file, info.needed) - } - } - - // Append indices to data0 section. - dataLen := uint64(p.data0.Len()) - w := p.newWriter() - w.writeIndex(p.declIndex) - - if bundle { - w.uint64(uint64(len(pkgs))) - for _, pkg := range pkgs { - w.pkg(pkg) - imps := pkg.Imports() - w.uint64(uint64(len(imps))) - for _, imp := range imps { - w.pkg(imp) - } - } - } - w.flush() - - // Assemble header. - var hdr intWriter - if bundle { - hdr.uint64(bundleVersion) - } - hdr.uint64(uint64(p.version)) - hdr.uint64(uint64(p.strings.Len())) - if p.shallow { - hdr.uint64(uint64(files.Len())) - hdr.uint64(uint64(len(fileOffset))) - for _, offset := range fileOffset { - hdr.uint64(offset) - } - } - hdr.uint64(dataLen) - - // Flush output. - io.Copy(out, &hdr) - io.Copy(out, &p.strings) - if p.shallow { - io.Copy(out, &files) - } - io.Copy(out, &p.data0) - - return nil -} - -// encodeFile writes to w a representation of the file sufficient to -// faithfully restore position information about all needed offsets. -// Mutates the needed array. -func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { - _ = needed[0] // precondition: needed is non-empty - - w.uint64(p.stringOff(file.Name())) - - size := uint64(file.Size()) - w.uint64(size) - - // Sort the set of needed offsets. Duplicates are harmless. - sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - - lines := tokeninternal.GetLines(file) // byte offset of each line start - w.uint64(uint64(len(lines))) - - // Rather than record the entire array of line start offsets, - // we save only a sparse list of (index, offset) pairs for - // the start of each line that contains a needed position. - var sparse [][2]int // (index, offset) pairs -outer: - for i, lineStart := range lines { - lineEnd := size - if i < len(lines)-1 { - lineEnd = uint64(lines[i+1]) - } - // Does this line contains a needed offset? - if needed[0] < lineEnd { - sparse = append(sparse, [2]int{i, lineStart}) - for needed[0] < lineEnd { - needed = needed[1:] - if len(needed) == 0 { - break outer - } - } - } - } - - // Delta-encode the columns. - w.uint64(uint64(len(sparse))) - var prev [2]int - for _, pair := range sparse { - w.uint64(uint64(pair[0] - prev[0])) - w.uint64(uint64(pair[1] - prev[1])) - prev = pair - } -} - -// writeIndex writes out an object index. mainIndex indicates whether -// we're writing out the main index, which is also read by -// non-compiler tools and includes a complete package description -// (i.e., name and height). -func (w *exportWriter) writeIndex(index map[types.Object]uint64) { - type pkgObj struct { - obj types.Object - name string // qualified name; differs from obj.Name for type params - } - // Build a map from packages to objects from that package. - pkgObjs := map[*types.Package][]pkgObj{} - - // For the main index, make sure to include every package that - // we reference, even if we're not exporting (or reexporting) - // any symbols from it. - if w.p.localpkg != nil { - pkgObjs[w.p.localpkg] = nil - } - for pkg := range w.p.allPkgs { - pkgObjs[pkg] = nil - } - - for obj := range index { - name := w.p.exportName(obj) - pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) - } - - var pkgs []*types.Package - for pkg, objs := range pkgObjs { - pkgs = append(pkgs, pkg) - - sort.Slice(objs, func(i, j int) bool { - return objs[i].name < objs[j].name - }) - } - - sort.Slice(pkgs, func(i, j int) bool { - return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) - }) - - w.uint64(uint64(len(pkgs))) - for _, pkg := range pkgs { - w.string(w.exportPath(pkg)) - w.string(pkg.Name()) - w.uint64(uint64(0)) // package height is not needed for go/types - - objs := pkgObjs[pkg] - w.uint64(uint64(len(objs))) - for _, obj := range objs { - w.string(obj.name) - w.uint64(index[obj.obj]) - } - } -} - -// exportName returns the 'exported' name of an object. It differs from -// obj.Name() only for type parameters (see tparamExportName for details). -func (p *iexporter) exportName(obj types.Object) (res string) { - if name := p.tparamNames[obj]; name != "" { - return name - } - return obj.Name() -} - -type iexporter struct { - fset *token.FileSet - out *bytes.Buffer - version int - - shallow bool // don't put types from other packages in the index - objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated - localpkg *types.Package // (nil in bundle mode) - - // allPkgs tracks all packages that have been referenced by - // the export data, so we can ensure to include them in the - // main index. - allPkgs map[*types.Package]bool - - declTodo objQueue - - strings intWriter - stringIndex map[string]uint64 - - // In shallow mode, object positions are encoded as (file, offset). - // Each file is recorded as a line-number table. - // Only the lines of needed positions are saved faithfully. - fileInfo map[*token.File]uint64 // value is index in fileInfos - fileInfos []*filePositions - - data0 intWriter - declIndex map[types.Object]uint64 - tparamNames map[types.Object]string // typeparam->exported name - typIndex map[types.Type]uint64 - - indent int // for tracing support -} - -type filePositions struct { - file *token.File - needed []uint64 // unordered list of needed file offsets -} - -func (p *iexporter) trace(format string, args ...interface{}) { - if !trace { - // Call sites should also be guarded, but having this check here allows - // easily enabling/disabling debug trace statements. - return - } - fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) -} - -// objectpathEncoder returns the lazily allocated objectpath.Encoder to use -// when encoding objects in other packages during shallow export. -// -// Using a shared Encoder amortizes some of cost of objectpath search. -func (p *iexporter) objectpathEncoder() *objectpath.Encoder { - if p.objEncoder == nil { - p.objEncoder = new(objectpath.Encoder) - } - return p.objEncoder -} - -// stringOff returns the offset of s within the string section. -// If not already present, it's added to the end. -func (p *iexporter) stringOff(s string) uint64 { - off, ok := p.stringIndex[s] - if !ok { - off = uint64(p.strings.Len()) - p.stringIndex[s] = off - - p.strings.uint64(uint64(len(s))) - p.strings.WriteString(s) - } - return off -} - -// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. -func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { - index, ok := p.fileInfo[file] - if !ok { - index = uint64(len(p.fileInfo)) - p.fileInfos = append(p.fileInfos, &filePositions{file: file}) - if p.fileInfo == nil { - p.fileInfo = make(map[*token.File]uint64) - } - p.fileInfo[file] = index - } - // Record each needed offset. - info := p.fileInfos[index] - offset := uint64(file.Offset(pos)) - info.needed = append(info.needed, offset) - - return index, offset -} - -// pushDecl adds n to the declaration work queue, if not already present. -func (p *iexporter) pushDecl(obj types.Object) { - // Package unsafe is known to the compiler and predeclared. - // Caller should not ask us to do export it. - if obj.Pkg() == types.Unsafe { - panic("cannot export package unsafe") - } - - // Shallow export data: don't index decls from other packages. - if p.shallow && obj.Pkg() != p.localpkg { - return - } - - if _, ok := p.declIndex[obj]; ok { - return - } - - p.declIndex[obj] = ^uint64(0) // mark obj present in work queue - p.declTodo.pushTail(obj) -} - -// exportWriter handles writing out individual data section chunks. -type exportWriter struct { - p *iexporter - - data intWriter - prevFile string - prevLine int64 - prevColumn int64 -} - -func (w *exportWriter) exportPath(pkg *types.Package) string { - if pkg == w.p.localpkg { - return "" - } - return pkg.Path() -} - -func (p *iexporter) doDecl(obj types.Object) { - if trace { - p.trace("exporting decl %v (%T)", obj, obj) - p.indent++ - defer func() { - p.indent-- - p.trace("=> %s", obj) - }() - } - w := p.newWriter() - - switch obj := obj.(type) { - case *types.Var: - w.tag(varTag) - w.pos(obj.Pos()) - w.typ(obj.Type(), obj.Pkg()) - - case *types.Func: - sig, _ := obj.Type().(*types.Signature) - if sig.Recv() != nil { - // We shouldn't see methods in the package scope, - // but the type checker may repair "func () F() {}" - // to "func (Invalid) F()" and then treat it like "func F()", - // so allow that. See golang/go#57729. - if sig.Recv().Type() != types.Typ[types.Invalid] { - panic(internalErrorf("unexpected method: %v", sig)) - } - } - - // Function. - if sig.TypeParams().Len() == 0 { - w.tag(funcTag) - } else { - w.tag(genericFuncTag) - } - w.pos(obj.Pos()) - // The tparam list of the function type is the declaration of the type - // params. So, write out the type params right now. Then those type params - // will be referenced via their type offset (via typOff) in all other - // places in the signature and function where they are used. - // - // While importing the type parameters, tparamList computes and records - // their export name, so that it can be later used when writing the index. - if tparams := sig.TypeParams(); tparams.Len() > 0 { - w.tparamList(obj.Name(), tparams, obj.Pkg()) - } - w.signature(sig) - - case *types.Const: - w.tag(constTag) - w.pos(obj.Pos()) - w.value(obj.Type(), obj.Val()) - - case *types.TypeName: - t := obj.Type() - - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { - w.tag(typeParamTag) - w.pos(obj.Pos()) - constraint := tparam.Constraint() - if p.version >= iexportVersionGo1_18 { - implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { - implicit = iface.IsImplicit() - } - w.bool(implicit) - } - w.typ(constraint, obj.Pkg()) - break - } - - if obj.IsAlias() { - w.tag(aliasTag) - w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { - // Preserve materialized aliases, - // even of non-exported types. - t = aliases.Rhs(alias) - } - w.typ(t, obj.Pkg()) - break - } - - // Defined type. - named, ok := t.(*types.Named) - if !ok { - panic(internalErrorf("%s is not a defined type", t)) - } - - if named.TypeParams().Len() == 0 { - w.tag(typeTag) - } else { - w.tag(genericTypeTag) - } - w.pos(obj.Pos()) - - if named.TypeParams().Len() > 0 { - // While importing the type parameters, tparamList computes and records - // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) - } - - underlying := named.Underlying() - w.typ(underlying, obj.Pkg()) - - if types.IsInterface(t) { - break - } - - n := named.NumMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := named.Method(i) - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - - // Receiver type parameters are type arguments of the receiver type, so - // their name must be qualified before exporting recv. - if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { - prefix := obj.Name() + "." + m.Name() - for i := 0; i < rparams.Len(); i++ { - rparam := rparams.At(i) - name := tparamExportName(prefix, rparam) - w.p.tparamNames[rparam.Obj()] = name - } - } - w.param(sig.Recv()) - w.signature(sig) - } - - default: - panic(internalErrorf("unexpected object: %v", obj)) - } - - p.declIndex[obj] = w.flush() -} - -func (w *exportWriter) tag(tag byte) { - w.data.WriteByte(tag) -} - -func (w *exportWriter) pos(pos token.Pos) { - if w.p.shallow { - w.posV2(pos) - } else if w.p.version >= iexportVersionPosCol { - w.posV1(pos) - } else { - w.posV0(pos) - } -} - -// posV2 encoding (used only in shallow mode) records positions as -// (file, offset), where file is the index in the token.File table -// (which records the file name and newline offsets) and offset is a -// byte offset. It effectively ignores //line directives. -func (w *exportWriter) posV2(pos token.Pos) { - if pos == token.NoPos { - w.uint64(0) - return - } - file := w.p.fset.File(pos) // fset must be non-nil - index, offset := w.p.fileIndexAndOffset(file, pos) - w.uint64(1 + index) - w.uint64(offset) -} - -func (w *exportWriter) posV1(pos token.Pos) { - if w.p.fset == nil { - w.int64(0) - return - } - - p := w.p.fset.Position(pos) - file := p.Filename - line := int64(p.Line) - column := int64(p.Column) - - deltaColumn := (column - w.prevColumn) << 1 - deltaLine := (line - w.prevLine) << 1 - - if file != w.prevFile { - deltaLine |= 1 - } - if deltaLine != 0 { - deltaColumn |= 1 - } - - w.int64(deltaColumn) - if deltaColumn&1 != 0 { - w.int64(deltaLine) - if deltaLine&1 != 0 { - w.string(file) - } - } - - w.prevFile = file - w.prevLine = line - w.prevColumn = column -} - -func (w *exportWriter) posV0(pos token.Pos) { - if w.p.fset == nil { - w.int64(0) - return - } - - p := w.p.fset.Position(pos) - file := p.Filename - line := int64(p.Line) - - // When file is the same as the last position (common case), - // we can save a few bytes by delta encoding just the line - // number. - // - // Note: Because data objects may be read out of order (or not - // at all), we can only apply delta encoding within a single - // object. This is handled implicitly by tracking prevFile and - // prevLine as fields of exportWriter. - - if file == w.prevFile { - delta := line - w.prevLine - w.int64(delta) - if delta == deltaNewFile { - w.int64(-1) - } - } else { - w.int64(deltaNewFile) - w.int64(line) // line >= 0 - w.string(file) - w.prevFile = file - } - w.prevLine = line -} - -func (w *exportWriter) pkg(pkg *types.Package) { - // Ensure any referenced packages are declared in the main index. - w.p.allPkgs[pkg] = true - - w.string(w.exportPath(pkg)) -} - -func (w *exportWriter) qualifiedType(obj *types.TypeName) { - name := w.p.exportName(obj) - - // Ensure any referenced declarations are written out too. - w.p.pushDecl(obj) - w.string(name) - w.pkg(obj.Pkg()) -} - -// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass -// it in explicitly into signatures and structs that may use it for -// constructing fields. -func (w *exportWriter) typ(t types.Type, pkg *types.Package) { - w.data.uint64(w.p.typOff(t, pkg)) -} - -func (p *iexporter) newWriter() *exportWriter { - return &exportWriter{p: p} -} - -func (w *exportWriter) flush() uint64 { - off := uint64(w.p.data0.Len()) - io.Copy(&w.p.data0, &w.data) - return off -} - -func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { - off, ok := p.typIndex[t] - if !ok { - w := p.newWriter() - w.doTyp(t, pkg) - off = predeclReserved + w.flush() - p.typIndex[t] = off - } - return off -} - -func (w *exportWriter) startType(k itag) { - w.data.uint64(uint64(k)) -} - -func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { - if trace { - w.p.trace("exporting type %s (%T)", t, t) - w.p.indent++ - defer func() { - w.p.indent-- - w.p.trace("=> %s", t) - }() - } - switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. - w.startType(aliasType) - w.qualifiedType(t.Obj()) - - case *types.Named: - if targs := t.TypeArgs(); targs.Len() > 0 { - w.startType(instanceType) - // TODO(rfindley): investigate if this position is correct, and if it - // matters. - w.pos(t.Obj().Pos()) - w.typeList(targs, pkg) - w.typ(t.Origin(), pkg) - return - } - w.startType(definedType) - w.qualifiedType(t.Obj()) - - case *types.TypeParam: - w.startType(typeParamType) - w.qualifiedType(t.Obj()) - - case *types.Pointer: - w.startType(pointerType) - w.typ(t.Elem(), pkg) - - case *types.Slice: - w.startType(sliceType) - w.typ(t.Elem(), pkg) - - case *types.Array: - w.startType(arrayType) - w.uint64(uint64(t.Len())) - w.typ(t.Elem(), pkg) - - case *types.Chan: - w.startType(chanType) - // 1 RecvOnly; 2 SendOnly; 3 SendRecv - var dir uint64 - switch t.Dir() { - case types.RecvOnly: - dir = 1 - case types.SendOnly: - dir = 2 - case types.SendRecv: - dir = 3 - } - w.uint64(dir) - w.typ(t.Elem(), pkg) - - case *types.Map: - w.startType(mapType) - w.typ(t.Key(), pkg) - w.typ(t.Elem(), pkg) - - case *types.Signature: - w.startType(signatureType) - w.pkg(pkg) - w.signature(t) - - case *types.Struct: - w.startType(structType) - n := t.NumFields() - // Even for struct{} we must emit some qualifying package, because that's - // what the compiler does, and thus that's what the importer expects. - fieldPkg := pkg - if n > 0 { - fieldPkg = t.Field(0).Pkg() - } - if fieldPkg == nil { - // TODO(rfindley): improve this very hacky logic. - // - // The importer expects a package to be set for all struct types, even - // those with no fields. A better encoding might be to set NumFields - // before pkg. setPkg panics with a nil package, which may be possible - // to reach with invalid packages (and perhaps valid packages, too?), so - // (arbitrarily) set the localpkg if available. - // - // Alternatively, we may be able to simply guarantee that pkg != nil, by - // reconsidering the encoding of constant values. - if w.p.shallow { - fieldPkg = w.p.localpkg - } else { - panic(internalErrorf("no package to set for empty struct")) - } - } - w.pkg(fieldPkg) - w.uint64(uint64(n)) - - for i := 0; i < n; i++ { - f := t.Field(i) - if w.p.shallow { - w.objectPath(f) - } - w.pos(f.Pos()) - w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg - w.typ(f.Type(), fieldPkg) - w.bool(f.Anonymous()) - w.string(t.Tag(i)) // note (or tag) - } - - case *types.Interface: - w.startType(interfaceType) - w.pkg(pkg) - - n := t.NumEmbeddeds() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - ft := t.EmbeddedType(i) - tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { - w.pos(named.Obj().Pos()) - } else { - w.pos(token.NoPos) - } - w.typ(ft, tPkg) - } - - // See comment for struct fields. In shallow mode we change the encoding - // for interface methods that are promoted from other packages. - - n = t.NumExplicitMethods() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - m := t.ExplicitMethod(i) - if w.p.shallow { - w.objectPath(m) - } - w.pos(m.Pos()) - w.string(m.Name()) - sig, _ := m.Type().(*types.Signature) - w.signature(sig) - } - - case *types.Union: - w.startType(unionType) - nt := t.Len() - w.uint64(uint64(nt)) - for i := 0; i < nt; i++ { - term := t.Term(i) - w.bool(term.Tilde()) - w.typ(term.Type(), pkg) - } - - default: - panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) - } -} - -// objectPath writes the package and objectPath to use to look up obj in a -// different package, when encoding in "shallow" mode. -// -// When doing a shallow import, the importer creates only the local package, -// and requests package symbols for dependencies from the client. -// However, certain types defined in the local package may hold objects defined -// (perhaps deeply) within another package. -// -// For example, consider the following: -// -// package a -// func F() chan * map[string] struct { X int } -// -// package b -// import "a" -// var B = a.F() -// -// In this example, the type of b.B holds fields defined in package a. -// In order to have the correct canonical objects for the field defined in the -// type of B, they are encoded as objectPaths and later looked up in the -// importer. The same problem applies to interface methods. -func (w *exportWriter) objectPath(obj types.Object) { - if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { - // obj.Pkg() may be nil for the builtin error.Error. - // In this case, or if obj is declared in the local package, no need to - // encode. - w.string("") - return - } - objectPath, err := w.p.objectpathEncoder().For(obj) - if err != nil { - // Fall back to the empty string, which will cause the importer to create a - // new object, which matches earlier behavior. Creating a new object is - // sufficient for many purposes (such as type checking), but causes certain - // references algorithms to fail (golang/go#60819). However, we didn't - // notice this problem during months of gopls@v0.12.0 testing. - // - // TODO(golang/go#61674): this workaround is insufficient, as in the case - // where the field forwarded from an instantiated type that may not appear - // in the export data of the original package: - // - // // package a - // type A[P any] struct{ F P } - // - // // package b - // type B a.A[int] - // - // We need to update references algorithms not to depend on this - // de-duplication, at which point we may want to simply remove the - // workaround here. - w.string("") - return - } - w.string(string(objectPath)) - w.pkg(obj.Pkg()) -} - -func (w *exportWriter) signature(sig *types.Signature) { - w.paramList(sig.Params()) - w.paramList(sig.Results()) - if sig.Params().Len() > 0 { - w.bool(sig.Variadic()) - } -} - -func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { - w.uint64(uint64(ts.Len())) - for i := 0; i < ts.Len(); i++ { - w.typ(ts.At(i), pkg) - } -} - -func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { - ll := uint64(list.Len()) - w.uint64(ll) - for i := 0; i < list.Len(); i++ { - tparam := list.At(i) - // Set the type parameter exportName before exporting its type. - exportName := tparamExportName(prefix, tparam) - w.p.tparamNames[tparam.Obj()] = exportName - w.typ(list.At(i), pkg) - } -} - -const blankMarker = "$" - -// tparamExportName returns the 'exported' name of a type parameter, which -// differs from its actual object name: it is prefixed with a qualifier, and -// blank type parameter names are disambiguated by their index in the type -// parameter list. -func tparamExportName(prefix string, tparam *types.TypeParam) string { - assert(prefix != "") - name := tparam.Obj().Name() - if name == "_" { - name = blankMarker + strconv.Itoa(tparam.Index()) - } - return prefix + "." + name -} - -// tparamName returns the real name of a type parameter, after stripping its -// qualifying prefix and reverting blank-name encoding. See tparamExportName -// for details. -func tparamName(exportName string) string { - // Remove the "path" from the type param name that makes it unique. - ix := strings.LastIndex(exportName, ".") - if ix < 0 { - errorf("malformed type parameter export name %s: missing prefix", exportName) - } - name := exportName[ix+1:] - if strings.HasPrefix(name, blankMarker) { - return "_" - } - return name -} - -func (w *exportWriter) paramList(tup *types.Tuple) { - n := tup.Len() - w.uint64(uint64(n)) - for i := 0; i < n; i++ { - w.param(tup.At(i)) - } -} - -func (w *exportWriter) param(obj types.Object) { - w.pos(obj.Pos()) - w.localIdent(obj) - w.typ(obj.Type(), obj.Pkg()) -} - -func (w *exportWriter) value(typ types.Type, v constant.Value) { - w.typ(typ, nil) - if w.p.version >= iexportVersionGo1_18 { - w.int64(int64(v.Kind())) - } - - if v.Kind() == constant.Unknown { - // golang/go#60605: treat unknown constant values as if they have invalid type - // - // This loses some fidelity over the package type-checked from source, but that - // is acceptable. - // - // TODO(rfindley): we should switch on the recorded constant kind rather - // than the constant type - return - } - - switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { - case types.IsBoolean: - w.bool(constant.BoolVal(v)) - case types.IsInteger: - var i big.Int - if i64, exact := constant.Int64Val(v); exact { - i.SetInt64(i64) - } else if ui64, exact := constant.Uint64Val(v); exact { - i.SetUint64(ui64) - } else { - i.SetString(v.ExactString(), 10) - } - w.mpint(&i, typ) - case types.IsFloat: - f := constantToFloat(v) - w.mpfloat(f, typ) - case types.IsComplex: - w.mpfloat(constantToFloat(constant.Real(v)), typ) - w.mpfloat(constantToFloat(constant.Imag(v)), typ) - case types.IsString: - w.string(constant.StringVal(v)) - default: - if b.Kind() == types.Invalid { - // package contains type errors - break - } - panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) - } -} - -// constantToFloat converts a constant.Value with kind constant.Float to a -// big.Float. -func constantToFloat(x constant.Value) *big.Float { - x = constant.ToFloat(x) - // Use the same floating-point precision (512) as cmd/compile - // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). - const mpprec = 512 - var f big.Float - f.SetPrec(mpprec) - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - n := valueToRat(num) - d := valueToRat(denom) - f.SetRat(n.Quo(n, d)) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - _, ok := f.SetString(x.ExactString()) - assert(ok) - } - return &f -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -// mpint exports a multi-precision integer. -// -// For unsigned types, small values are written out as a single -// byte. Larger values are written out as a length-prefixed big-endian -// byte string, where the length prefix is encoded as its complement. -// For example, bytes 0, 1, and 2 directly represent the integer -// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, -// 2-, and 3-byte big-endian string follow. -// -// Encoding for signed types use the same general approach as for -// unsigned types, except small values use zig-zag encoding and the -// bottom bit of length prefix byte for large values is reserved as a -// sign bit. -// -// The exact boundary between small and large encodings varies -// according to the maximum number of bytes needed to encode a value -// of type typ. As a special case, 8-bit types are always encoded as a -// single byte. -// -// TODO(mdempsky): Is this level of complexity really worthwhile? -func (w *exportWriter) mpint(x *big.Int, typ types.Type) { - basic, ok := typ.Underlying().(*types.Basic) - if !ok { - panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) - } - - signed, maxBytes := intSize(basic) - - negative := x.Sign() < 0 - if !signed && negative { - panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) - } - - b := x.Bytes() - if len(b) > 0 && b[0] == 0 { - panic(internalErrorf("leading zeros")) - } - if uint(len(b)) > maxBytes { - panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) - } - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - // Check if x can use small value encoding. - if len(b) <= 1 { - var ux uint - if len(b) == 1 { - ux = uint(b[0]) - } - if signed { - ux <<= 1 - if negative { - ux-- - } - } - if ux < maxSmall { - w.data.WriteByte(byte(ux)) - return - } - } - - n := 256 - uint(len(b)) - if signed { - n = 256 - 2*uint(len(b)) - if negative { - n |= 1 - } - } - if n < maxSmall || n >= 256 { - panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) - } - - w.data.WriteByte(byte(n)) - w.data.Write(b) -} - -// mpfloat exports a multi-precision floating point number. -// -// The number's value is decomposed into mantissa × 2**exponent, where -// mantissa is an integer. The value is written out as mantissa (as a -// multi-precision integer) and then the exponent, except exponent is -// omitted if mantissa is zero. -func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { - if f.IsInf() { - panic("infinite constant") - } - - // Break into f = mant × 2**exp, with 0.5 <= mant < 1. - var mant big.Float - exp := int64(f.MantExp(&mant)) - - // Scale so that mant is an integer. - prec := mant.MinPrec() - mant.SetMantExp(&mant, int(prec)) - exp -= int64(prec) - - manti, acc := mant.Int(nil) - if acc != big.Exact { - panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) - } - w.mpint(manti, typ) - if manti.Sign() != 0 { - w.int64(exp) - } -} - -func (w *exportWriter) bool(b bool) bool { - var x uint64 - if b { - x = 1 - } - w.uint64(x) - return b -} - -func (w *exportWriter) int64(x int64) { w.data.int64(x) } -func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } -func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } - -func (w *exportWriter) localIdent(obj types.Object) { - // Anonymous parameters. - if obj == nil { - w.string("") - return - } - - name := obj.Name() - if name == "_" { - w.string("_") - return - } - - w.string(name) -} - -type intWriter struct { - bytes.Buffer -} - -func (w *intWriter) int64(x int64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutVarint(buf[:], x) - w.Write(buf[:n]) -} - -func (w *intWriter) uint64(x uint64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - w.Write(buf[:n]) -} - -func assert(cond bool) { - if !cond { - panic("internal error: assertion failed") - } -} - -// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. - -// objQueue is a FIFO queue of types.Object. The zero value of objQueue is -// a ready-to-use empty queue. -type objQueue struct { - ring []types.Object - head, tail int -} - -// empty returns true if q contains no Nodes. -func (q *objQueue) empty() bool { - return q.head == q.tail -} - -// pushTail appends n to the tail of the queue. -func (q *objQueue) pushTail(obj types.Object) { - if len(q.ring) == 0 { - q.ring = make([]types.Object, 16) - } else if q.head+len(q.ring) == q.tail { - // Grow the ring. - nring := make([]types.Object, len(q.ring)*2) - // Copy the old elements. - part := q.ring[q.head%len(q.ring):] - if q.tail-q.head <= len(part) { - part = part[:q.tail-q.head] - copy(nring, part) - } else { - pos := copy(nring, part) - copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) - } - q.ring, q.head, q.tail = nring, 0, q.tail-q.head - } - - q.ring[q.tail%len(q.ring)] = obj - q.tail++ -} - -// popHead pops a node from the head of the queue. It panics if q is empty. -func (q *objQueue) popHead() types.Object { - if q.empty() { - panic("dequeue empty") - } - obj := q.ring[q.head%len(q.ring)] - q.head++ - return obj -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -// TODO(adonovan): make this call panic, so that it's symmetric with errorf. -// Otherwise it's easy to forget to do anything with the error. -// -// TODO(adonovan): also, consider switching the names "errorf" and -// "internalErrorf" as the former is used for bugs, whose cause is -// internal inconsistency, whereas the latter is used for ordinary -// situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go deleted file mode 100644 index 136aa0365..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ /dev/null @@ -1,1100 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. - -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "io" - "math/big" - "sort" - "strings" - - "golang.org/x/tools/go/types/objectpath" - "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/typesinternal" -) - -type intReader struct { - *bytes.Reader - path string -} - -func (r *intReader) int64() int64 { - i, err := binary.ReadVarint(r.Reader) - if err != nil { - errorf("import %q: read varint error: %v", r.path, err) - } - return i -} - -func (r *intReader) uint64() uint64 { - i, err := binary.ReadUvarint(r.Reader) - if err != nil { - errorf("import %q: read varint error: %v", r.path, err) - } - return i -} - -// Keep this in sync with constants in iexport.go. -const ( - iexportVersionGo1_11 = 0 - iexportVersionPosCol = 1 - iexportVersionGo1_18 = 2 - iexportVersionGenerics = 2 - - iexportVersionCurrent = 2 -) - -type ident struct { - pkg *types.Package - name string -} - -const predeclReserved = 32 - -type itag uint64 - -const ( - // Types - definedType itag = iota - pointerType - sliceType - arrayType - chanType - mapType - signatureType - structType - interfaceType - typeParamType - instanceType - unionType - aliasType -) - -// Object tags -const ( - varTag = 'V' - funcTag = 'F' - genericFuncTag = 'G' - constTag = 'C' - aliasTag = 'A' - genericAliasTag = 'B' - typeParamTag = 'P' - typeTag = 'T' - genericTypeTag = 'U' -) - -// IImportData imports a package from the serialized package data -// and returns 0 and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) - if err != nil { - return 0, nil, err - } - return 0, pkgs[0], nil -} - -// IImportBundle imports a set of packages from the serialized package bundle. -func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) -} - -// A GetPackagesFunc function obtains the non-nil symbols for a set of -// packages, creating and recursively importing them as needed. An -// implementation should store each package symbol is in the Pkg -// field of the items array. -// -// Any error causes importing to fail. This can be used to quickly read -// the import manifest of an export data file without fully decoding it. -type GetPackagesFunc = func(items []GetPackagesItem) error - -// A GetPackagesItem is a request from the importer for the package -// symbol of the specified name and path. -type GetPackagesItem struct { - Name, Path string - Pkg *types.Package // to be filled in by GetPackagesFunc call - - // private importer state - pathOffset uint64 - nameIndex map[string]uint64 -} - -// GetPackagesFromMap returns a GetPackagesFunc that retrieves -// packages from the given map of package path to package. -// -// The returned function may mutate m: each requested package that is not -// found is created with types.NewPackage and inserted into m. -func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { - return func(items []GetPackagesItem) error { - for i, item := range items { - pkg, ok := m[item.Path] - if !ok { - pkg = types.NewPackage(item.Path, item.Name) - m[item.Path] = pkg - } - items[i].Pkg = pkg - } - return nil - } -} - -func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { - const currentVersion = iexportVersionCurrent - version := int64(-1) - if !debug { - defer func() { - if e := recover(); e != nil { - if bundle { - err = fmt.Errorf("%v", e) - } else if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) - } - } - }() - } - - r := &intReader{bytes.NewReader(data), path} - - if bundle { - if v := r.uint64(); v != bundleVersion { - errorf("unknown bundle format version %d", v) - } - } - - version = int64(r.uint64()) - switch version { - case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: - default: - if version > iexportVersionGo1_18 { - errorf("unstable iexport format version %d, just rebuild compiler and std library", version) - } else { - errorf("unknown iexport format version %d", version) - } - } - - sLen := int64(r.uint64()) - var fLen int64 - var fileOffset []uint64 - if shallow { - // Shallow mode uses a different position encoding. - fLen = int64(r.uint64()) - fileOffset = make([]uint64, r.uint64()) - for i := range fileOffset { - fileOffset[i] = r.uint64() - } - } - dLen := int64(r.uint64()) - - whence, _ := r.Seek(0, io.SeekCurrent) - stringData := data[whence : whence+sLen] - fileData := data[whence+sLen : whence+sLen+fLen] - declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] - r.Seek(sLen+fLen+dLen, io.SeekCurrent) - - p := iimporter{ - version: int(version), - ipath: path, - aliases: aliases.Enabled(), - shallow: shallow, - reportf: reportf, - - stringData: stringData, - stringCache: make(map[uint64]string), - fileOffset: fileOffset, - fileData: fileData, - fileCache: make([]*token.File, len(fileOffset)), - pkgCache: make(map[uint64]*types.Package), - - declData: declData, - pkgIndex: make(map[*types.Package]map[string]uint64), - typCache: make(map[uint64]types.Type), - // Separate map for typeparams, keyed by their package and unique - // name. - tparamIndex: make(map[ident]types.Type), - - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - for i, pt := range predeclared() { - p.typCache[uint64(i)] = pt - } - - // Gather the relevant packages from the manifest. - items := make([]GetPackagesItem, r.uint64()) - uniquePkgPaths := make(map[string]bool) - for i := range items { - pkgPathOff := r.uint64() - pkgPath := p.stringAt(pkgPathOff) - pkgName := p.stringAt(r.uint64()) - _ = r.uint64() // package height; unused by go/types - - if pkgPath == "" { - pkgPath = path - } - items[i].Name = pkgName - items[i].Path = pkgPath - items[i].pathOffset = pkgPathOff - - // Read index for package. - nameIndex := make(map[string]uint64) - nSyms := r.uint64() - // In shallow mode, only the current package (i=0) has an index. - assert(!(shallow && i > 0 && nSyms != 0)) - for ; nSyms > 0; nSyms-- { - name := p.stringAt(r.uint64()) - nameIndex[name] = r.uint64() - } - - items[i].nameIndex = nameIndex - - uniquePkgPaths[pkgPath] = true - } - // Debugging #63822; hypothesis: there are duplicate PkgPaths. - if len(uniquePkgPaths) != len(items) { - reportf("found duplicate PkgPaths while reading export data manifest: %v", items) - } - - // Request packages all at once from the client, - // enabling a parallel implementation. - if err := getPackages(items); err != nil { - return nil, err // don't wrap this error - } - - // Check the results and complete the index. - pkgList := make([]*types.Package, len(items)) - for i, item := range items { - pkg := item.Pkg - if pkg == nil { - errorf("internal error: getPackages returned nil package for %q", item.Path) - } else if pkg.Path() != item.Path { - errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) - } else if pkg.Name() != item.Name { - errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) - } - p.pkgCache[item.pathOffset] = pkg - p.pkgIndex[pkg] = item.nameIndex - pkgList[i] = pkg - } - - if bundle { - pkgs = make([]*types.Package, r.uint64()) - for i := range pkgs { - pkg := p.pkgAt(r.uint64()) - imps := make([]*types.Package, r.uint64()) - for j := range imps { - imps[j] = p.pkgAt(r.uint64()) - } - pkg.SetImports(imps) - pkgs[i] = pkg - } - } else { - if len(pkgList) == 0 { - errorf("no packages found for %s", path) - panic("unreachable") - } - pkgs = pkgList[:1] - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), pkgList[1:]...) - sort.Sort(byPath(list)) - pkgs[0].SetImports(list) - } - - for _, pkg := range pkgs { - if pkg.Complete() { - continue - } - - names := make([]string, 0, len(p.pkgIndex[pkg])) - for name := range p.pkgIndex[pkg] { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - p.doDecl(pkg, name) - } - - // package was imported completely and without errors - pkg.MarkComplete() - } - - // SetConstraint can't be called if the constraint type is not yet complete. - // When type params are created in the typeParamTag case of (*importReader).obj(), - // the associated constraint type may not be complete due to recursion. - // Therefore, we defer calling SetConstraint there, and call it here instead - // after all types are complete. - for _, d := range p.later { - d.t.SetConstraint(d.constraint) - } - - for _, typ := range p.interfaceList { - typ.Complete() - } - - // Workaround for golang/go#61561. See the doc for instanceList for details. - for _, typ := range p.instanceList { - if iface, _ := typ.Underlying().(*types.Interface); iface != nil { - iface.Complete() - } - } - - return pkgs, nil -} - -type setConstraintArgs struct { - t *types.TypeParam - constraint types.Type -} - -type iimporter struct { - version int - ipath string - - aliases bool - shallow bool - reportf ReportFunc // if non-nil, used to report bugs - - stringData []byte - stringCache map[uint64]string - fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i - fileData []byte - fileCache []*token.File // memoized decoding of file encoded as i - pkgCache map[uint64]*types.Package - - declData []byte - pkgIndex map[*types.Package]map[string]uint64 - typCache map[uint64]types.Type - tparamIndex map[ident]types.Type - - fake fakeFileSet - interfaceList []*types.Interface - - // Workaround for the go/types bug golang/go#61561: instances produced during - // instantiation may contain incomplete interfaces. Here we only complete the - // underlying type of the instance, which is the most common case but doesn't - // handle parameterized interface literals defined deeper in the type. - instanceList []types.Type // instances for later completion (see golang/go#61561) - - // Arguments for calls to SetConstraint that are deferred due to recursive types - later []setConstraintArgs - - indent int // for tracing support -} - -func (p *iimporter) trace(format string, args ...interface{}) { - if !trace { - // Call sites should also be guarded, but having this check here allows - // easily enabling/disabling debug trace statements. - return - } - fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) -} - -func (p *iimporter) doDecl(pkg *types.Package, name string) { - if debug { - p.trace("import decl %s", name) - p.indent++ - defer func() { - p.indent-- - p.trace("=> %s", name) - }() - } - // See if we've already imported this declaration. - if obj := pkg.Scope().Lookup(name); obj != nil { - return - } - - off, ok := p.pkgIndex[pkg][name] - if !ok { - // In deep mode, the index should be complete. In shallow - // mode, we should have already recursively loaded necessary - // dependencies so the above Lookup succeeds. - errorf("%v.%v not in index", pkg, name) - } - - r := &importReader{p: p, currPkg: pkg} - r.declReader.Reset(p.declData[off:]) - - r.obj(name) -} - -func (p *iimporter) stringAt(off uint64) string { - if s, ok := p.stringCache[off]; ok { - return s - } - - slen, n := binary.Uvarint(p.stringData[off:]) - if n <= 0 { - errorf("varint failed") - } - spos := off + uint64(n) - s := string(p.stringData[spos : spos+slen]) - p.stringCache[off] = s - return s -} - -func (p *iimporter) fileAt(index uint64) *token.File { - file := p.fileCache[index] - if file == nil { - off := p.fileOffset[index] - file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) - p.fileCache[index] = file - } - return file -} - -func (p *iimporter) decodeFile(rd intReader) *token.File { - filename := p.stringAt(rd.uint64()) - size := int(rd.uint64()) - file := p.fake.fset.AddFile(filename, -1, size) - - // SetLines requires a nondecreasing sequence. - // Because it is common for clients to derive the interval - // [start, start+len(name)] from a start position, and we - // want to ensure that the end offset is on the same line, - // we fill in the gaps of the sparse encoding with values - // that strictly increase by the largest possible amount. - // This allows us to avoid having to record the actual end - // offset of each needed line. - - lines := make([]int, int(rd.uint64())) - var index, offset int - for i, n := 0, int(rd.uint64()); i < n; i++ { - index += int(rd.uint64()) - offset += int(rd.uint64()) - lines[index] = offset - - // Ensure monotonicity between points. - for j := index - 1; j > 0 && lines[j] == 0; j-- { - lines[j] = lines[j+1] - 1 - } - } - - // Ensure monotonicity after last point. - for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { - size-- - lines[j] = size - } - - if !file.SetLines(lines) { - errorf("SetLines failed: %d", lines) // can't happen - } - return file -} - -func (p *iimporter) pkgAt(off uint64) *types.Package { - if pkg, ok := p.pkgCache[off]; ok { - return pkg - } - path := p.stringAt(off) - errorf("missing package %q in %q", path, p.ipath) - return nil -} - -func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { - if t, ok := p.typCache[off]; ok && canReuse(base, t) { - return t - } - - if off < predeclReserved { - errorf("predeclared type missing from cache: %v", off) - } - - r := &importReader{p: p} - r.declReader.Reset(p.declData[off-predeclReserved:]) - t := r.doType(base) - - if canReuse(base, t) { - p.typCache[off] = t - } - return t -} - -// canReuse reports whether the type rhs on the RHS of the declaration for def -// may be re-used. -// -// Specifically, if def is non-nil and rhs is an interface type with methods, it -// may not be re-used because we have a convention of setting the receiver type -// for interface methods to def. -func canReuse(def *types.Named, rhs types.Type) bool { - if def == nil { - return true - } - iface, _ := aliases.Unalias(rhs).(*types.Interface) - if iface == nil { - return true - } - // Don't use iface.Empty() here as iface may not be complete. - return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 -} - -type importReader struct { - p *iimporter - declReader bytes.Reader - currPkg *types.Package - prevFile string - prevLine int64 - prevColumn int64 -} - -func (r *importReader) obj(name string) { - tag := r.byte() - pos := r.pos() - - switch tag { - case aliasTag: - typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) - - case constTag: - typ, val := r.value() - - r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) - - case funcTag, genericFuncTag: - var tparams []*types.TypeParam - if tag == genericFuncTag { - tparams = r.tparamList() - } - sig := r.signature(nil, nil, tparams) - r.declare(types.NewFunc(pos, r.currPkg, name, sig)) - - case typeTag, genericTypeTag: - // Types can be recursive. We need to setup a stub - // declaration before recursing. - obj := types.NewTypeName(pos, r.currPkg, name, nil) - named := types.NewNamed(obj, nil, nil) - // Declare obj before calling r.tparamList, so the new type name is recognized - // if used in the constraint of one of its own typeparams (see #48280). - r.declare(obj) - if tag == genericTypeTag { - tparams := r.tparamList() - named.SetTypeParams(tparams) - } - - underlying := r.p.typAt(r.uint64(), named).Underlying() - named.SetUnderlying(underlying) - - if !isInterface(underlying) { - for n := r.uint64(); n > 0; n-- { - mpos := r.pos() - mname := r.ident() - recv := r.param() - - // If the receiver has any targs, set those as the - // rparams of the method (since those are the - // typeparams being used in the method sig/body). - _, recvNamed := typesinternal.ReceiverNamed(recv) - targs := recvNamed.TypeArgs() - var rparams []*types.TypeParam - if targs.Len() > 0 { - rparams = make([]*types.TypeParam, targs.Len()) - for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) - } - } - msig := r.signature(recv, rparams, nil) - - named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) - } - } - - case typeParamTag: - // We need to "declare" a typeparam in order to have a name that - // can be referenced recursively (if needed) in the type param's - // bound. - if r.p.version < iexportVersionGenerics { - errorf("unexpected type param type") - } - name0 := tparamName(name) - tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := types.NewTypeParam(tn, nil) - - // To handle recursive references to the typeparam within its - // bound, save the partial type in tparamIndex before reading the bounds. - id := ident{r.currPkg, name} - r.p.tparamIndex[id] = t - var implicit bool - if r.p.version >= iexportVersionGo1_18 { - implicit = r.bool() - } - constraint := r.typ() - if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) - if iface == nil { - errorf("non-interface constraint marked implicit") - } - iface.MarkImplicit() - } - // The constraint type may not be complete, if we - // are in the middle of a type recursion involving type - // constraints. So, we defer SetConstraint until we have - // completely set up all types in ImportData. - r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) - - case varTag: - typ := r.typ() - - r.declare(types.NewVar(pos, r.currPkg, name, typ)) - - default: - errorf("unexpected tag: %v", tag) - } -} - -func (r *importReader) declare(obj types.Object) { - obj.Pkg().Scope().Insert(obj) -} - -func (r *importReader) value() (typ types.Type, val constant.Value) { - typ = r.typ() - if r.p.version >= iexportVersionGo1_18 { - // TODO: add support for using the kind. - _ = constant.Kind(r.int64()) - } - - switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { - case types.IsBoolean: - val = constant.MakeBool(r.bool()) - - case types.IsString: - val = constant.MakeString(r.string()) - - case types.IsInteger: - var x big.Int - r.mpint(&x, b) - val = constant.Make(&x) - - case types.IsFloat: - val = r.mpfloat(b) - - case types.IsComplex: - re := r.mpfloat(b) - im := r.mpfloat(b) - val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - - default: - if b.Kind() == types.Invalid { - val = constant.MakeUnknown() - return - } - errorf("unexpected type %v", typ) // panics - panic("unreachable") - } - - return -} - -func intSize(b *types.Basic) (signed bool, maxBytes uint) { - if (b.Info() & types.IsUntyped) != 0 { - return true, 64 - } - - switch b.Kind() { - case types.Float32, types.Complex64: - return true, 3 - case types.Float64, types.Complex128: - return true, 7 - } - - signed = (b.Info() & types.IsUnsigned) == 0 - switch b.Kind() { - case types.Int8, types.Uint8: - maxBytes = 1 - case types.Int16, types.Uint16: - maxBytes = 2 - case types.Int32, types.Uint32: - maxBytes = 4 - default: - maxBytes = 8 - } - - return -} - -func (r *importReader) mpint(x *big.Int, typ *types.Basic) { - signed, maxBytes := intSize(typ) - - maxSmall := 256 - maxBytes - if signed { - maxSmall = 256 - 2*maxBytes - } - if maxBytes == 1 { - maxSmall = 256 - } - - n, _ := r.declReader.ReadByte() - if uint(n) < maxSmall { - v := int64(n) - if signed { - v >>= 1 - if n&1 != 0 { - v = ^v - } - } - x.SetInt64(v) - return - } - - v := -n - if signed { - v = -(n &^ 1) >> 1 - } - if v < 1 || uint(v) > maxBytes { - errorf("weird decoding: %v, %v => %v", n, signed, v) - } - b := make([]byte, v) - io.ReadFull(&r.declReader, b) - x.SetBytes(b) - if signed && n&1 != 0 { - x.Neg(x) - } -} - -func (r *importReader) mpfloat(typ *types.Basic) constant.Value { - var mant big.Int - r.mpint(&mant, typ) - var f big.Float - f.SetInt(&mant) - if f.Sign() != 0 { - f.SetMantExp(&f, int(r.int64())) - } - return constant.Make(&f) -} - -func (r *importReader) ident() string { - return r.string() -} - -func (r *importReader) qualifiedIdent() (*types.Package, string) { - name := r.string() - pkg := r.pkg() - return pkg, name -} - -func (r *importReader) pos() token.Pos { - if r.p.shallow { - // precise offsets are encoded only in shallow mode - return r.posv2() - } - if r.p.version >= iexportVersionPosCol { - r.posv1() - } else { - r.posv0() - } - - if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { - return token.NoPos - } - return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) -} - -func (r *importReader) posv0() { - delta := r.int64() - if delta != deltaNewFile { - r.prevLine += delta - } else if l := r.int64(); l == -1 { - r.prevLine += deltaNewFile - } else { - r.prevFile = r.string() - r.prevLine = l - } -} - -func (r *importReader) posv1() { - delta := r.int64() - r.prevColumn += delta >> 1 - if delta&1 != 0 { - delta = r.int64() - r.prevLine += delta >> 1 - if delta&1 != 0 { - r.prevFile = r.string() - } - } -} - -func (r *importReader) posv2() token.Pos { - file := r.uint64() - if file == 0 { - return token.NoPos - } - tf := r.p.fileAt(file - 1) - return tf.Pos(int(r.uint64())) -} - -func (r *importReader) typ() types.Type { - return r.p.typAt(r.uint64(), nil) -} - -func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) - return ok -} - -func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } -func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } - -func (r *importReader) doType(base *types.Named) (res types.Type) { - k := r.kind() - if debug { - r.p.trace("importing type %d (base: %s)", k, base) - r.p.indent++ - defer func() { - r.p.indent-- - r.p.trace("=> %s", res) - }() - } - switch k { - default: - errorf("unexpected kind tag in %q: %v", r.p.ipath, k) - return nil - - case aliasType, definedType: - pkg, name := r.qualifiedIdent() - r.p.doDecl(pkg, name) - return pkg.Scope().Lookup(name).(*types.TypeName).Type() - case pointerType: - return types.NewPointer(r.typ()) - case sliceType: - return types.NewSlice(r.typ()) - case arrayType: - n := r.uint64() - return types.NewArray(r.typ(), int64(n)) - case chanType: - dir := chanDir(int(r.uint64())) - return types.NewChan(dir, r.typ()) - case mapType: - return types.NewMap(r.typ(), r.typ()) - case signatureType: - r.currPkg = r.pkg() - return r.signature(nil, nil, nil) - - case structType: - r.currPkg = r.pkg() - - fields := make([]*types.Var, r.uint64()) - tags := make([]string, len(fields)) - for i := range fields { - var field *types.Var - if r.p.shallow { - field, _ = r.objectPathObject().(*types.Var) - } - - fpos := r.pos() - fname := r.ident() - ftyp := r.typ() - emb := r.bool() - tag := r.string() - - // Either this is not a shallow import, the field is local, or the - // encoded objectPath failed to produce an object (a bug). - // - // Even in this last, buggy case, fall back on creating a new field. As - // discussed in iexport.go, this is not correct, but mostly works and is - // preferable to failing (for now at least). - if field == nil { - field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) - } - - fields[i] = field - tags[i] = tag - } - return types.NewStruct(fields, tags) - - case interfaceType: - r.currPkg = r.pkg() - - embeddeds := make([]types.Type, r.uint64()) - for i := range embeddeds { - _ = r.pos() - embeddeds[i] = r.typ() - } - - methods := make([]*types.Func, r.uint64()) - for i := range methods { - var method *types.Func - if r.p.shallow { - method, _ = r.objectPathObject().(*types.Func) - } - - mpos := r.pos() - mname := r.ident() - - // TODO(mdempsky): Matches bimport.go, but I - // don't agree with this. - var recv *types.Var - if base != nil { - recv = types.NewVar(token.NoPos, r.currPkg, "", base) - } - msig := r.signature(recv, nil, nil) - - if method == nil { - method = types.NewFunc(mpos, r.currPkg, mname, msig) - } - methods[i] = method - } - - typ := newInterface(methods, embeddeds) - r.p.interfaceList = append(r.p.interfaceList, typ) - return typ - - case typeParamType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected type param type") - } - pkg, name := r.qualifiedIdent() - id := ident{pkg, name} - if t, ok := r.p.tparamIndex[id]; ok { - // We're already in the process of importing this typeparam. - return t - } - // Otherwise, import the definition of the typeparam now. - r.p.doDecl(pkg, name) - return r.p.tparamIndex[id] - - case instanceType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected instantiation type") - } - // pos does not matter for instances: they are positioned on the original - // type. - _ = r.pos() - len := r.uint64() - targs := make([]types.Type, len) - for i := range targs { - targs[i] = r.typ() - } - baseType := r.typ() - // The imported instantiated type doesn't include any methods, so - // we must always use the methods of the base (orig) type. - // TODO provide a non-nil *Environment - t, _ := types.Instantiate(nil, baseType, targs, false) - - // Workaround for golang/go#61561. See the doc for instanceList for details. - r.p.instanceList = append(r.p.instanceList, t) - return t - - case unionType: - if r.p.version < iexportVersionGenerics { - errorf("unexpected instantiation type") - } - terms := make([]*types.Term, r.uint64()) - for i := range terms { - terms[i] = types.NewTerm(r.bool(), r.typ()) - } - return types.NewUnion(terms) - } -} - -func (r *importReader) kind() itag { - return itag(r.uint64()) -} - -// objectPathObject is the inverse of exportWriter.objectPath. -// -// In shallow mode, certain fields and methods may need to be looked up in an -// imported package. See the doc for exportWriter.objectPath for a full -// explanation. -func (r *importReader) objectPathObject() types.Object { - objPath := objectpath.Path(r.string()) - if objPath == "" { - return nil - } - pkg := r.pkg() - obj, err := objectpath.Object(pkg, objPath) - if err != nil { - if r.p.reportf != nil { - r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) - } - } - return obj -} - -func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { - params := r.paramList() - results := r.paramList() - variadic := params.Len() > 0 && r.bool() - return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) -} - -func (r *importReader) tparamList() []*types.TypeParam { - n := r.uint64() - if n == 0 { - return nil - } - xs := make([]*types.TypeParam, n) - for i := range xs { - // Note: the standard library importer is tolerant of nil types here, - // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) - } - return xs -} - -func (r *importReader) paramList() *types.Tuple { - xs := make([]*types.Var, r.uint64()) - for i := range xs { - xs[i] = r.param() - } - return types.NewTuple(xs...) -} - -func (r *importReader) param() *types.Var { - pos := r.pos() - name := r.ident() - typ := r.typ() - return types.NewParam(pos, r.currPkg, name, typ) -} - -func (r *importReader) bool() bool { - return r.uint64() != 0 -} - -func (r *importReader) int64() int64 { - n, err := binary.ReadVarint(&r.declReader) - if err != nil { - errorf("readVarint: %v", err) - } - return n -} - -func (r *importReader) uint64() uint64 { - n, err := binary.ReadUvarint(&r.declReader) - if err != nil { - errorf("readUvarint: %v", err) - } - return n -} - -func (r *importReader) byte() byte { - x, err := r.declReader.ReadByte() - if err != nil { - errorf("declReader.ReadByte: %v", err) - } - return x -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d0..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40f..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b6..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624cad..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b3..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go deleted file mode 100644 index 2c0770688..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ /dev/null @@ -1,728 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Derived from go/internal/gcimporter/ureader.go - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" - "sort" - "strings" - - "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/pkgbits" -) - -// A pkgReader holds the shared state for reading a unified IR package -// description. -type pkgReader struct { - pkgbits.PkgDecoder - - fake fakeFileSet - - ctxt *types.Context - imports map[string]*types.Package // previously imported packages, indexed by path - aliases bool // create types.Alias nodes - - // lazily initialized arrays corresponding to the unified IR - // PosBase, Pkg, and Type sections, respectively. - posBases []string // position bases (i.e., file names) - pkgs []*types.Package - typs []types.Type - - // laterFns holds functions that need to be invoked at the end of - // import reading. - laterFns []func() - // laterFors is used in case of 'type A B' to ensure that B is processed before A. - laterFors map[types.Type]int - - // ifaces holds a list of constructed Interfaces, which need to have - // Complete called after importing is done. - ifaces []*types.Interface -} - -// later adds a function to be invoked at the end of import reading. -func (pr *pkgReader) later(fn func()) { - pr.laterFns = append(pr.laterFns, fn) -} - -// See cmd/compile/internal/noder.derivedInfo. -type derivedInfo struct { - idx pkgbits.Index - needed bool -} - -// See cmd/compile/internal/noder.typeInfo. -type typeInfo struct { - idx pkgbits.Index - derived bool -} - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - if !debug { - defer func() { - if x := recover(); x != nil { - err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) - } - }() - } - - s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] - input := pkgbits.NewPkgDecoder(path, s) - pkg = readUnifiedPackage(fset, nil, imports, input) - return -} - -// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. -func (pr *pkgReader) laterFor(t types.Type, fn func()) { - if pr.laterFors == nil { - pr.laterFors = make(map[types.Type]int) - } - pr.laterFors[t] = len(pr.laterFns) - pr.laterFns = append(pr.laterFns, fn) -} - -// readUnifiedPackage reads a package description from the given -// unified IR export data decoder. -func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { - pr := pkgReader{ - PkgDecoder: input, - - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - - ctxt: ctxt, - imports: imports, - aliases: aliases.Enabled(), - - posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), - pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), - typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), - } - defer pr.fake.setLines() - - r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) - pkg := r.pkg() - r.Bool() // has init - - for i, n := 0, r.Len(); i < n; i++ { - // As if r.obj(), but avoiding the Scope.Lookup call, - // to avoid eager loading of imports. - r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) - r.p.objIdx(r.Reloc(pkgbits.RelocObj)) - assert(r.Len() == 0) - } - - r.Sync(pkgbits.SyncEOF) - - for _, fn := range pr.laterFns { - fn() - } - - for _, iface := range pr.ifaces { - iface.Complete() - } - - // Imports() of pkg are all of the transitive packages that were loaded. - var imps []*types.Package - for _, imp := range pr.pkgs { - if imp != nil && imp != pkg { - imps = append(imps, imp) - } - } - sort.Sort(byPath(imps)) - pkg.SetImports(imps) - - pkg.MarkComplete() - return pkg -} - -// A reader holds the state for reading a single unified IR element -// within a package. -type reader struct { - pkgbits.Decoder - - p *pkgReader - - dict *readerDict -} - -// A readerDict holds the state for type parameters that parameterize -// the current unified IR element. -type readerDict struct { - // bounds is a slice of typeInfos corresponding to the underlying - // bounds of the element's type parameters. - bounds []typeInfo - - // tparams is a slice of the constructed TypeParams for the element. - tparams []*types.TypeParam - - // devived is a slice of types derived from tparams, which may be - // instantiated while reading the current element. - derived []derivedInfo - derivedTypes []types.Type // lazily instantiated from derived -} - -func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { - return &reader{ - Decoder: pr.NewDecoder(k, idx, marker), - p: pr, - } -} - -func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { - return &reader{ - Decoder: pr.TempDecoder(k, idx, marker), - p: pr, - } -} - -func (pr *pkgReader) retireReader(r *reader) { - pr.RetireDecoder(&r.Decoder) -} - -// @@@ Positions - -func (r *reader) pos() token.Pos { - r.Sync(pkgbits.SyncPos) - if !r.Bool() { - return token.NoPos - } - - // TODO(mdempsky): Delta encoding. - posBase := r.posBase() - line := r.Uint() - col := r.Uint() - return r.p.fake.pos(posBase, int(line), int(col)) -} - -func (r *reader) posBase() string { - return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) -} - -func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { - if b := pr.posBases[idx]; b != "" { - return b - } - - var filename string - { - r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) - - // Within types2, position bases have a lot more details (e.g., - // keeping track of where //line directives appeared exactly). - // - // For go/types, we just track the file name. - - filename = r.String() - - if r.Bool() { // file base - // Was: "b = token.NewTrimmedFileBase(filename, true)" - } else { // line base - pos := r.pos() - line := r.Uint() - col := r.Uint() - - // Was: "b = token.NewLineBase(pos, filename, true, line, col)" - _, _, _ = pos, line, col - } - pr.retireReader(r) - } - b := filename - pr.posBases[idx] = b - return b -} - -// @@@ Packages - -func (r *reader) pkg() *types.Package { - r.Sync(pkgbits.SyncPkg) - return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) -} - -func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { - // TODO(mdempsky): Consider using some non-nil pointer to indicate - // the universe scope, so we don't need to keep re-reading it. - if pkg := pr.pkgs[idx]; pkg != nil { - return pkg - } - - pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() - pr.pkgs[idx] = pkg - return pkg -} - -func (r *reader) doPkg() *types.Package { - path := r.String() - switch path { - case "": - path = r.p.PkgPath() - case "builtin": - return nil // universe - case "unsafe": - return types.Unsafe - } - - if pkg := r.p.imports[path]; pkg != nil { - return pkg - } - - name := r.String() - - pkg := types.NewPackage(path, name) - r.p.imports[path] = pkg - - return pkg -} - -// @@@ Types - -func (r *reader) typ() types.Type { - return r.p.typIdx(r.typInfo(), r.dict) -} - -func (r *reader) typInfo() typeInfo { - r.Sync(pkgbits.SyncType) - if r.Bool() { - return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} - } - return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} -} - -func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { - idx := info.idx - var where *types.Type - if info.derived { - where = &dict.derivedTypes[idx] - idx = dict.derived[idx].idx - } else { - where = &pr.typs[idx] - } - - if typ := *where; typ != nil { - return typ - } - - var typ types.Type - { - r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) - r.dict = dict - - typ = r.doTyp() - assert(typ != nil) - pr.retireReader(r) - } - // See comment in pkgReader.typIdx explaining how this happens. - if prev := *where; prev != nil { - return prev - } - - *where = typ - return typ -} - -func (r *reader) doTyp() (res types.Type) { - switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { - default: - errorf("unhandled type tag: %v", tag) - panic("unreachable") - - case pkgbits.TypeBasic: - return types.Typ[r.Len()] - - case pkgbits.TypeNamed: - obj, targs := r.obj() - name := obj.(*types.TypeName) - if len(targs) != 0 { - t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) - return t - } - return name.Type() - - case pkgbits.TypeTypeParam: - return r.dict.tparams[r.Len()] - - case pkgbits.TypeArray: - len := int64(r.Uint64()) - return types.NewArray(r.typ(), len) - case pkgbits.TypeChan: - dir := types.ChanDir(r.Len()) - return types.NewChan(dir, r.typ()) - case pkgbits.TypeMap: - return types.NewMap(r.typ(), r.typ()) - case pkgbits.TypePointer: - return types.NewPointer(r.typ()) - case pkgbits.TypeSignature: - return r.signature(nil, nil, nil) - case pkgbits.TypeSlice: - return types.NewSlice(r.typ()) - case pkgbits.TypeStruct: - return r.structType() - case pkgbits.TypeInterface: - return r.interfaceType() - case pkgbits.TypeUnion: - return r.unionType() - } -} - -func (r *reader) structType() *types.Struct { - fields := make([]*types.Var, r.Len()) - var tags []string - for i := range fields { - pos := r.pos() - pkg, name := r.selector() - ftyp := r.typ() - tag := r.String() - embedded := r.Bool() - - fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) - if tag != "" { - for len(tags) < i { - tags = append(tags, "") - } - tags = append(tags, tag) - } - } - return types.NewStruct(fields, tags) -} - -func (r *reader) unionType() *types.Union { - terms := make([]*types.Term, r.Len()) - for i := range terms { - terms[i] = types.NewTerm(r.Bool(), r.typ()) - } - return types.NewUnion(terms) -} - -func (r *reader) interfaceType() *types.Interface { - methods := make([]*types.Func, r.Len()) - embeddeds := make([]types.Type, r.Len()) - implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() - - for i := range methods { - pos := r.pos() - pkg, name := r.selector() - mtyp := r.signature(nil, nil, nil) - methods[i] = types.NewFunc(pos, pkg, name, mtyp) - } - - for i := range embeddeds { - embeddeds[i] = r.typ() - } - - iface := types.NewInterfaceType(methods, embeddeds) - if implicit { - iface.MarkImplicit() - } - - // We need to call iface.Complete(), but if there are any embedded - // defined types, then we may not have set their underlying - // interface type yet. So we need to defer calling Complete until - // after we've called SetUnderlying everywhere. - // - // TODO(mdempsky): After CL 424876 lands, it should be safe to call - // iface.Complete() immediately. - r.p.ifaces = append(r.p.ifaces, iface) - - return iface -} - -func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { - r.Sync(pkgbits.SyncSignature) - - params := r.params() - results := r.params() - variadic := r.Bool() - - return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) -} - -func (r *reader) params() *types.Tuple { - r.Sync(pkgbits.SyncParams) - - params := make([]*types.Var, r.Len()) - for i := range params { - params[i] = r.param() - } - - return types.NewTuple(params...) -} - -func (r *reader) param() *types.Var { - r.Sync(pkgbits.SyncParam) - - pos := r.pos() - pkg, name := r.localIdent() - typ := r.typ() - - return types.NewParam(pos, pkg, name, typ) -} - -// @@@ Objects - -func (r *reader) obj() (types.Object, []types.Type) { - r.Sync(pkgbits.SyncObject) - - assert(!r.Bool()) - - pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) - obj := pkgScope(pkg).Lookup(name) - - targs := make([]types.Type, r.Len()) - for i := range targs { - targs[i] = r.typ() - } - - return obj, targs -} - -func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { - - var objPkg *types.Package - var objName string - var tag pkgbits.CodeObj - { - rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) - - objPkg, objName = rname.qualifiedIdent() - assert(objName != "") - - tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) - pr.retireReader(rname) - } - - if tag == pkgbits.ObjStub { - assert(objPkg == nil || objPkg == types.Unsafe) - return objPkg, objName - } - - // Ignore local types promoted to global scope (#55110). - if _, suffix := splitVargenSuffix(objName); suffix != "" { - return objPkg, objName - } - - if objPkg.Scope().Lookup(objName) == nil { - dict := pr.objDictIdx(idx) - - r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) - r.dict = dict - - declare := func(obj types.Object) { - objPkg.Scope().Insert(obj) - } - - switch tag { - default: - panic("weird") - - case pkgbits.ObjAlias: - pos := r.pos() - typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) - - case pkgbits.ObjConst: - pos := r.pos() - typ := r.typ() - val := r.Value() - declare(types.NewConst(pos, objPkg, objName, typ, val)) - - case pkgbits.ObjFunc: - pos := r.pos() - tparams := r.typeParamNames() - sig := r.signature(nil, nil, tparams) - declare(types.NewFunc(pos, objPkg, objName, sig)) - - case pkgbits.ObjType: - pos := r.pos() - - obj := types.NewTypeName(pos, objPkg, objName, nil) - named := types.NewNamed(obj, nil, nil) - declare(obj) - - named.SetTypeParams(r.typeParamNames()) - - setUnderlying := func(underlying types.Type) { - // If the underlying type is an interface, we need to - // duplicate its methods so we can replace the receiver - // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { - methods := make([]*types.Func, iface.NumExplicitMethods()) - for i := range methods { - fn := iface.ExplicitMethod(i) - sig := fn.Type().(*types.Signature) - - recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) - } - - embeds := make([]types.Type, iface.NumEmbeddeds()) - for i := range embeds { - embeds[i] = iface.EmbeddedType(i) - } - - newIface := types.NewInterfaceType(methods, embeds) - r.p.ifaces = append(r.p.ifaces, newIface) - underlying = newIface - } - - named.SetUnderlying(underlying) - } - - // Since go.dev/cl/455279, we can assume rhs.Underlying() will - // always be non-nil. However, to temporarily support users of - // older snapshot releases, we continue to fallback to the old - // behavior for now. - // - // TODO(mdempsky): Remove fallback code and simplify after - // allowing time for snapshot users to upgrade. - rhs := r.typ() - if underlying := rhs.Underlying(); underlying != nil { - setUnderlying(underlying) - } else { - pk := r.p - pk.laterFor(named, func() { - // First be sure that the rhs is initialized, if it needs to be initialized. - delete(pk.laterFors, named) // prevent cycles - if i, ok := pk.laterFors[rhs]; ok { - f := pk.laterFns[i] - pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op - f() // initialize RHS - } - setUnderlying(rhs.Underlying()) - }) - } - - for i, n := 0, r.Len(); i < n; i++ { - named.AddMethod(r.method()) - } - - case pkgbits.ObjVar: - pos := r.pos() - typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) - } - } - - return objPkg, objName -} - -func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { - - var dict readerDict - - { - r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) - if implicits := r.Len(); implicits != 0 { - errorf("unexpected object with %v implicit type parameter(s)", implicits) - } - - dict.bounds = make([]typeInfo, r.Len()) - for i := range dict.bounds { - dict.bounds[i] = r.typInfo() - } - - dict.derived = make([]derivedInfo, r.Len()) - dict.derivedTypes = make([]types.Type, len(dict.derived)) - for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} - } - - pr.retireReader(r) - } - // function references follow, but reader doesn't need those - - return &dict -} - -func (r *reader) typeParamNames() []*types.TypeParam { - r.Sync(pkgbits.SyncTypeParamNames) - - // Note: This code assumes it only processes objects without - // implement type parameters. This is currently fine, because - // reader is only used to read in exported declarations, which are - // always package scoped. - - if len(r.dict.bounds) == 0 { - return nil - } - - // Careful: Type parameter lists may have cycles. To allow for this, - // we construct the type parameter list in two passes: first we - // create all the TypeNames and TypeParams, then we construct and - // set the bound type. - - r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) - for i := range r.dict.bounds { - pos := r.pos() - pkg, name := r.localIdent() - - tname := types.NewTypeName(pos, pkg, name, nil) - r.dict.tparams[i] = types.NewTypeParam(tname, nil) - } - - typs := make([]types.Type, len(r.dict.bounds)) - for i, bound := range r.dict.bounds { - typs[i] = r.p.typIdx(bound, r.dict) - } - - // TODO(mdempsky): This is subtle, elaborate further. - // - // We have to save tparams outside of the closure, because - // typeParamNames() can be called multiple times with the same - // dictionary instance. - // - // Also, this needs to happen later to make sure SetUnderlying has - // been called. - // - // TODO(mdempsky): Is it safe to have a single "later" slice or do - // we need to have multiple passes? See comments on CL 386002 and - // go.dev/issue/52104. - tparams := r.dict.tparams - r.p.later(func() { - for i, typ := range typs { - tparams[i].SetConstraint(typ) - } - }) - - return r.dict.tparams -} - -func (r *reader) method() *types.Func { - r.Sync(pkgbits.SyncMethod) - pos := r.pos() - pkg, name := r.selector() - - rparams := r.typeParamNames() - sig := r.signature(r.param(), rparams, nil) - - _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. - return types.NewFunc(pos, pkg, name, sig) -} - -func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } -func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } -func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } - -func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { - r.Sync(marker) - return r.pkg(), r.String() -} - -// pkgScope returns pkg.Scope(). -// If pkg is nil, it returns types.Universe instead. -// -// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. -func pkgScope(pkg *types.Package) *types.Scope { - if pkg != nil { - return pkg.Scope() - } - return types.Universe -} diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go deleted file mode 100644 index 44719de17..000000000 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packagesinternal exposes internal-only fields from go/packages. -package packagesinternal - -var GetForTest = func(p interface{}) string { return "" } -var GetDepsErrors = func(p interface{}) []*PackageError { return nil } - -type PackageError struct { - ImportStack []string // shortest path from package named on command line to this one - Pos string // position of error (if present, file:line:col) - Err string // the error itself -} - -var TypecheckCgo int -var DepsErrors int // must be set as a LoadMode to call GetDepsErrors -var ForTest int // must be set as a LoadMode to call GetForTest - -var SetModFlag = func(config interface{}, value string) {} -var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/internal/pkgbits/codes.go deleted file mode 100644 index f0cabde96..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/codes.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -// A Code is an enum value that can be encoded into bitstreams. -// -// Code types are preferable for enum types, because they allow -// Decoder to detect desyncs. -type Code interface { - // Marker returns the SyncMarker for the Code's dynamic type. - Marker() SyncMarker - - // Value returns the Code's ordinal value. - Value() int -} - -// A CodeVal distinguishes among go/constant.Value encodings. -type CodeVal int - -func (c CodeVal) Marker() SyncMarker { return SyncVal } -func (c CodeVal) Value() int { return int(c) } - -// Note: These values are public and cannot be changed without -// updating the go/types importers. - -const ( - ValBool CodeVal = iota - ValString - ValInt64 - ValBigInt - ValBigRat - ValBigFloat -) - -// A CodeType distinguishes among go/types.Type encodings. -type CodeType int - -func (c CodeType) Marker() SyncMarker { return SyncType } -func (c CodeType) Value() int { return int(c) } - -// Note: These values are public and cannot be changed without -// updating the go/types importers. - -const ( - TypeBasic CodeType = iota - TypeNamed - TypePointer - TypeSlice - TypeArray - TypeChan - TypeMap - TypeSignature - TypeStruct - TypeInterface - TypeUnion - TypeTypeParam -) - -// A CodeObj distinguishes among go/types.Object encodings. -type CodeObj int - -func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } -func (c CodeObj) Value() int { return int(c) } - -// Note: These values are public and cannot be changed without -// updating the go/types importers. - -const ( - ObjAlias CodeObj = iota - ObjConst - ObjType - ObjFunc - ObjVar - ObjStub -) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go deleted file mode 100644 index 2acd85851..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import ( - "encoding/binary" - "errors" - "fmt" - "go/constant" - "go/token" - "io" - "math/big" - "os" - "runtime" - "strings" -) - -// A PkgDecoder provides methods for decoding a package's Unified IR -// export data. -type PkgDecoder struct { - // version is the file format version. - version uint32 - - // aliases determines whether types.Aliases should be created - aliases bool - - // sync indicates whether the file uses sync markers. - sync bool - - // pkgPath is the package path for the package to be decoded. - // - // TODO(mdempsky): Remove; unneeded since CL 391014. - pkgPath string - - // elemData is the full data payload of the encoded package. - // Elements are densely and contiguously packed together. - // - // The last 8 bytes of elemData are the package fingerprint. - elemData string - - // elemEnds stores the byte-offset end positions of element - // bitstreams within elemData. - // - // For example, element I's bitstream data starts at elemEnds[I-1] - // (or 0, if I==0) and ends at elemEnds[I]. - // - // Note: elemEnds is indexed by absolute indices, not - // section-relative indices. - elemEnds []uint32 - - // elemEndsEnds stores the index-offset end positions of relocation - // sections within elemEnds. - // - // For example, section K's end positions start at elemEndsEnds[K-1] - // (or 0, if K==0) and end at elemEndsEnds[K]. - elemEndsEnds [numRelocs]uint32 - - scratchRelocEnt []RelocEnt -} - -// PkgPath returns the package path for the package -// -// TODO(mdempsky): Remove; unneeded since CL 391014. -func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } - -// SyncMarkers reports whether pr uses sync markers. -func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } - -// NewPkgDecoder returns a PkgDecoder initialized to read the Unified -// IR export data from input. pkgPath is the package path for the -// compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. -func NewPkgDecoder(pkgPath, input string) PkgDecoder { - pr := PkgDecoder{ - pkgPath: pkgPath, - //aliases: aliases.Enabled(), - } - - // TODO(mdempsky): Implement direct indexing of input string to - // avoid copying the position information. - - r := strings.NewReader(input) - - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) - - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: - var flags uint32 - assert(binary.Read(r, binary.LittleEndian, &flags) == nil) - pr.sync = flags&flagSyncMarkers != 0 - } - - assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) - - pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) - assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) - - pos, err := r.Seek(0, io.SeekCurrent) - assert(err == nil) - - pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) - - return pr -} - -// NumElems returns the number of elements in section k. -func (pr *PkgDecoder) NumElems(k RelocKind) int { - count := int(pr.elemEndsEnds[k]) - if k > 0 { - count -= int(pr.elemEndsEnds[k-1]) - } - return count -} - -// TotalElems returns the total number of elements across all sections. -func (pr *PkgDecoder) TotalElems() int { - return len(pr.elemEnds) -} - -// Fingerprint returns the package fingerprint. -func (pr *PkgDecoder) Fingerprint() [8]byte { - var fp [8]byte - copy(fp[:], pr.elemData[len(pr.elemData)-8:]) - return fp -} - -// AbsIdx returns the absolute index for the given (section, index) -// pair. -func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { - absIdx := int(idx) - if k > 0 { - absIdx += int(pr.elemEndsEnds[k-1]) - } - if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) - } - return absIdx -} - -// DataIdx returns the raw element bitstream for the given (section, -// index) pair. -func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { - absIdx := pr.AbsIdx(k, idx) - - var start uint32 - if absIdx > 0 { - start = pr.elemEnds[absIdx-1] - } - end := pr.elemEnds[absIdx] - - return pr.elemData[start:end] -} - -// StringIdx returns the string value for the given string index. -func (pr *PkgDecoder) StringIdx(idx Index) string { - return pr.DataIdx(RelocString, idx) -} - -// NewDecoder returns a Decoder for the given (section, index) pair, -// and decodes the given SyncMarker from the element bitstream. -func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { - r := pr.NewDecoderRaw(k, idx) - r.Sync(marker) - return r -} - -// TempDecoder returns a Decoder for the given (section, index) pair, -// and decodes the given SyncMarker from the element bitstream. -// If possible the Decoder should be RetireDecoder'd when it is no longer -// needed, this will avoid heap allocations. -func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { - r := pr.TempDecoderRaw(k, idx) - r.Sync(marker) - return r -} - -func (pr *PkgDecoder) RetireDecoder(d *Decoder) { - pr.scratchRelocEnt = d.Relocs - d.Relocs = nil -} - -// NewDecoderRaw returns a Decoder for the given (section, index) pair. -// -// Most callers should use NewDecoder instead. -func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { - r := Decoder{ - common: pr, - k: k, - Idx: idx, - } - - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - - r.Sync(SyncRelocs) - r.Relocs = make([]RelocEnt, r.Len()) - for i := range r.Relocs { - r.Sync(SyncReloc) - r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} - } - - return r -} - -func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { - r := Decoder{ - common: pr, - k: k, - Idx: idx, - } - - r.Data.Reset(pr.DataIdx(k, idx)) - r.Sync(SyncRelocs) - l := r.Len() - if cap(pr.scratchRelocEnt) >= l { - r.Relocs = pr.scratchRelocEnt[:l] - pr.scratchRelocEnt = nil - } else { - r.Relocs = make([]RelocEnt, l) - } - for i := range r.Relocs { - r.Sync(SyncReloc) - r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} - } - - return r -} - -// A Decoder provides methods for decoding an individual element's -// bitstream data. -type Decoder struct { - common *PkgDecoder - - Relocs []RelocEnt - Data strings.Reader - - k RelocKind - Idx Index -} - -func (r *Decoder) checkErr(err error) { - if err != nil { - errorf("unexpected decoding error: %w", err) - } -} - -func (r *Decoder) rawUvarint() uint64 { - x, err := readUvarint(&r.Data) - r.checkErr(err) - return x -} - -// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. -// This avoids the interface conversion and thus has better escape properties, -// which flows up the stack. -func readUvarint(r *strings.Reader) (uint64, error) { - var x uint64 - var s uint - for i := 0; i < binary.MaxVarintLen64; i++ { - b, err := r.ReadByte() - if err != nil { - if i > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return x, err - } - if b < 0x80 { - if i == binary.MaxVarintLen64-1 && b > 1 { - return x, overflow - } - return x | uint64(b)<> 1) - if ux&1 != 0 { - x = ^x - } - return x -} - -func (r *Decoder) rawReloc(k RelocKind, idx int) Index { - e := r.Relocs[idx] - assert(e.Kind == k) - return e.Idx -} - -// Sync decodes a sync marker from the element bitstream and asserts -// that it matches the expected marker. -// -// If r.common.sync is false, then Sync is a no-op. -func (r *Decoder) Sync(mWant SyncMarker) { - if !r.common.sync { - return - } - - pos, _ := r.Data.Seek(0, io.SeekCurrent) - mHave := SyncMarker(r.rawUvarint()) - writerPCs := make([]int, r.rawUvarint()) - for i := range writerPCs { - writerPCs[i] = int(r.rawUvarint()) - } - - if mHave == mWant { - return - } - - // There's some tension here between printing: - // - // (1) full file paths that tools can recognize (e.g., so emacs - // hyperlinks the "file:line" text for easy navigation), or - // - // (2) short file paths that are easier for humans to read (e.g., by - // omitting redundant or irrelevant details, so it's easier to - // focus on the useful bits that remain). - // - // The current formatting favors the former, as it seems more - // helpful in practice. But perhaps the formatting could be improved - // to better address both concerns. For example, use relative file - // paths if they would be shorter, or rewrite file paths to contain - // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how - // to reliably expand that again. - - fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) - - fmt.Printf("\nfound %v, written at:\n", mHave) - if len(writerPCs) == 0 { - fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) - } - for _, pc := range writerPCs { - fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) - } - - fmt.Printf("\nexpected %v, reading at:\n", mWant) - var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? - n := runtime.Callers(2, readerPCs[:]) - for _, pc := range fmtFrames(readerPCs[:n]...) { - fmt.Printf("\t%s\n", pc) - } - - // We already printed a stack trace for the reader, so now we can - // simply exit. Printing a second one with panic or base.Fatalf - // would just be noise. - os.Exit(1) -} - -// Bool decodes and returns a bool value from the element bitstream. -func (r *Decoder) Bool() bool { - r.Sync(SyncBool) - x, err := r.Data.ReadByte() - r.checkErr(err) - assert(x < 2) - return x != 0 -} - -// Int64 decodes and returns an int64 value from the element bitstream. -func (r *Decoder) Int64() int64 { - r.Sync(SyncInt64) - return r.rawVarint() -} - -// Uint64 decodes and returns a uint64 value from the element bitstream. -func (r *Decoder) Uint64() uint64 { - r.Sync(SyncUint64) - return r.rawUvarint() -} - -// Len decodes and returns a non-negative int value from the element bitstream. -func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } - -// Int decodes and returns an int value from the element bitstream. -func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } - -// Uint decodes and returns a uint value from the element bitstream. -func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } - -// Code decodes a Code value from the element bitstream and returns -// its ordinal value. It's the caller's responsibility to convert the -// result to an appropriate Code type. -// -// TODO(mdempsky): Ideally this method would have signature "Code[T -// Code] T" instead, but we don't allow generic methods and the -// compiler can't depend on generics yet anyway. -func (r *Decoder) Code(mark SyncMarker) int { - r.Sync(mark) - return r.Len() -} - -// Reloc decodes a relocation of expected section k from the element -// bitstream and returns an index to the referenced element. -func (r *Decoder) Reloc(k RelocKind) Index { - r.Sync(SyncUseReloc) - return r.rawReloc(k, r.Len()) -} - -// String decodes and returns a string value from the element -// bitstream. -func (r *Decoder) String() string { - r.Sync(SyncString) - return r.common.StringIdx(r.Reloc(RelocString)) -} - -// Strings decodes and returns a variable-length slice of strings from -// the element bitstream. -func (r *Decoder) Strings() []string { - res := make([]string, r.Len()) - for i := range res { - res[i] = r.String() - } - return res -} - -// Value decodes and returns a constant.Value from the element -// bitstream. -func (r *Decoder) Value() constant.Value { - r.Sync(SyncValue) - isComplex := r.Bool() - val := r.scalar() - if isComplex { - val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) - } - return val -} - -func (r *Decoder) scalar() constant.Value { - switch tag := CodeVal(r.Code(SyncVal)); tag { - default: - panic(fmt.Errorf("unexpected scalar tag: %v", tag)) - - case ValBool: - return constant.MakeBool(r.Bool()) - case ValString: - return constant.MakeString(r.String()) - case ValInt64: - return constant.MakeInt64(r.Int64()) - case ValBigInt: - return constant.Make(r.bigInt()) - case ValBigRat: - num := r.bigInt() - denom := r.bigInt() - return constant.Make(new(big.Rat).SetFrac(num, denom)) - case ValBigFloat: - return constant.Make(r.bigFloat()) - } -} - -func (r *Decoder) bigInt() *big.Int { - v := new(big.Int).SetBytes([]byte(r.String())) - if r.Bool() { - v.Neg(v) - } - return v -} - -func (r *Decoder) bigFloat() *big.Float { - v := new(big.Float).SetPrec(512) - assert(v.UnmarshalText([]byte(r.String())) == nil) - return v -} - -// @@@ Helpers - -// TODO(mdempsky): These should probably be removed. I think they're a -// smell that the export data format is not yet quite right. - -// PeekPkgPath returns the package path for the specified package -// index. -func (pr *PkgDecoder) PeekPkgPath(idx Index) string { - var path string - { - r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) - path = r.String() - pr.RetireDecoder(&r) - } - if path == "" { - path = pr.pkgPath - } - return path -} - -// PeekObj returns the package path, object name, and CodeObj for the -// specified object index. -func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { - var ridx Index - var name string - var rcode int - { - r := pr.TempDecoder(RelocName, idx, SyncObject1) - r.Sync(SyncSym) - r.Sync(SyncPkg) - ridx = r.Reloc(RelocPkg) - name = r.String() - rcode = r.Code(SyncCodeObj) - pr.RetireDecoder(&r) - } - - path := pr.PeekPkgPath(ridx) - assert(name != "") - - tag := CodeObj(rcode) - - return path, name, tag -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/internal/pkgbits/doc.go deleted file mode 100644 index c8a2796b5..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pkgbits implements low-level coding abstractions for -// Unified IR's export data format. -// -// At a low-level, a package is a collection of bitstream elements. -// Each element has a "kind" and a dense, non-negative index. -// Elements can be randomly accessed given their kind and index. -// -// Individual elements are sequences of variable-length values (e.g., -// integers, booleans, strings, go/constant values, cross-references -// to other elements). Package pkgbits provides APIs for encoding and -// decoding these low-level values, but the details of mapping -// higher-level Go constructs into elements is left to higher-level -// abstractions. -// -// Elements may cross-reference each other with "relocations." For -// example, an element representing a pointer type has a relocation -// referring to the element type. -// -// Go constructs may be composed as a constellation of multiple -// elements. For example, a declared function may have one element to -// describe the object (e.g., its name, type, position), and a -// separate element to describe its function body. This allows readers -// some flexibility in efficiently seeking or re-reading data (e.g., -// inlining requires re-reading the function body for each inlined -// call, without needing to re-read the object-level details). -// -// This is a copy of internal/pkgbits in the Go implementation. -package pkgbits diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go deleted file mode 100644 index 6482617a4..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import ( - "bytes" - "crypto/md5" - "encoding/binary" - "go/constant" - "io" - "math/big" - "runtime" -) - -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - -// A PkgEncoder provides methods for encoding a package's Unified IR -// export data. -type PkgEncoder struct { - // elems holds the bitstream for previously encoded elements. - elems [numRelocs][]string - - // stringsIdx maps previously encoded strings to their index within - // the RelocString section, to allow deduplication. That is, - // elems[RelocString][stringsIdx[s]] == s (if present). - stringsIdx map[string]Index - - // syncFrames is the number of frames to write at each sync - // marker. A negative value means sync markers are omitted. - syncFrames int -} - -// SyncMarkers reports whether pw uses sync markers. -func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } - -// NewPkgEncoder returns an initialized PkgEncoder. -// -// syncFrames is the number of caller frames that should be serialized -// at Sync points. Serializing additional frames results in larger -// export data files, but can help diagnosing desync errors in -// higher-level Unified IR reader/writer code. If syncFrames is -// negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { - return PkgEncoder{ - stringsIdx: make(map[string]Index), - syncFrames: syncFrames, - } -} - -// DumpTo writes the package's encoded data to out0 and returns the -// package fingerprint. -func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { - h := md5.New() - out := io.MultiWriter(out0, h) - - writeUint32 := func(x uint32) { - assert(binary.Write(out, binary.LittleEndian, x) == nil) - } - - writeUint32(currentVersion) - - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers - } - writeUint32(flags) - - // Write elemEndsEnds. - var sum uint32 - for _, elems := range &pw.elems { - sum += uint32(len(elems)) - writeUint32(sum) - } - - // Write elemEnds. - sum = 0 - for _, elems := range &pw.elems { - for _, elem := range elems { - sum += uint32(len(elem)) - writeUint32(sum) - } - } - - // Write elemData. - for _, elems := range &pw.elems { - for _, elem := range elems { - _, err := io.WriteString(out, elem) - assert(err == nil) - } - } - - // Write fingerprint. - copy(fingerprint[:], h.Sum(nil)) - _, err := out0.Write(fingerprint[:]) - assert(err == nil) - - return -} - -// StringIdx adds a string value to the strings section, if not -// already present, and returns its index. -func (pw *PkgEncoder) StringIdx(s string) Index { - if idx, ok := pw.stringsIdx[s]; ok { - assert(pw.elems[RelocString][idx] == s) - return idx - } - - idx := Index(len(pw.elems[RelocString])) - pw.elems[RelocString] = append(pw.elems[RelocString], s) - pw.stringsIdx[s] = idx - return idx -} - -// NewEncoder returns an Encoder for a new element within the given -// section, and encodes the given SyncMarker as the start of the -// element bitstream. -func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { - e := pw.NewEncoderRaw(k) - e.Sync(marker) - return e -} - -// NewEncoderRaw returns an Encoder for a new element within the given -// section. -// -// Most callers should use NewEncoder instead. -func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { - idx := Index(len(pw.elems[k])) - pw.elems[k] = append(pw.elems[k], "") // placeholder - - return Encoder{ - p: pw, - k: k, - Idx: idx, - } -} - -// An Encoder provides methods for encoding an individual element's -// bitstream data. -type Encoder struct { - p *PkgEncoder - - Relocs []RelocEnt - RelocMap map[RelocEnt]uint32 - Data bytes.Buffer // accumulated element bitstream data - - encodingRelocHeader bool - - k RelocKind - Idx Index // index within relocation section -} - -// Flush finalizes the element's bitstream and returns its Index. -func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved - - // Backup the data so we write the relocations at the front. - var tmp bytes.Buffer - io.Copy(&tmp, &w.Data) - - // TODO(mdempsky): Consider writing these out separately so they're - // easier to strip, along with function bodies, so that we can prune - // down to just the data that's relevant to go/types. - if w.encodingRelocHeader { - panic("encodingRelocHeader already true; recursive flush?") - } - w.encodingRelocHeader = true - w.Sync(SyncRelocs) - w.Len(len(w.Relocs)) - for _, rEnt := range w.Relocs { - w.Sync(SyncReloc) - w.Len(int(rEnt.Kind)) - w.Len(int(rEnt.Idx)) - } - - io.Copy(&sb, &w.Data) - io.Copy(&sb, &tmp) - w.p.elems[w.k][w.Idx] = sb.String() - - return w.Idx -} - -func (w *Encoder) checkErr(err error) { - if err != nil { - errorf("unexpected encoding error: %v", err) - } -} - -func (w *Encoder) rawUvarint(x uint64) { - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - _, err := w.Data.Write(buf[:n]) - w.checkErr(err) -} - -func (w *Encoder) rawVarint(x int64) { - // Zig-zag encode. - ux := uint64(x) << 1 - if x < 0 { - ux = ^ux - } - - w.rawUvarint(ux) -} - -func (w *Encoder) rawReloc(r RelocKind, idx Index) int { - e := RelocEnt{r, idx} - if w.RelocMap != nil { - if i, ok := w.RelocMap[e]; ok { - return int(i) - } - } else { - w.RelocMap = make(map[RelocEnt]uint32) - } - - i := len(w.Relocs) - w.RelocMap[e] = uint32(i) - w.Relocs = append(w.Relocs, e) - return i -} - -func (w *Encoder) Sync(m SyncMarker) { - if !w.p.SyncMarkers() { - return - } - - // Writing out stack frame string references requires working - // relocations, but writing out the relocations themselves involves - // sync markers. To prevent infinite recursion, we simply trim the - // stack frame for sync markers within the relocation header. - var frames []string - if !w.encodingRelocHeader && w.p.syncFrames > 0 { - pcs := make([]uintptr, w.p.syncFrames) - n := runtime.Callers(2, pcs) - frames = fmtFrames(pcs[:n]...) - } - - // TODO(mdempsky): Save space by writing out stack frames as a - // linked list so we can share common stack frames. - w.rawUvarint(uint64(m)) - w.rawUvarint(uint64(len(frames))) - for _, frame := range frames { - w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) - } -} - -// Bool encodes and writes a bool value into the element bitstream, -// and then returns the bool value. -// -// For simple, 2-alternative encodings, the idiomatic way to call Bool -// is something like: -// -// if w.Bool(x != 0) { -// // alternative #1 -// } else { -// // alternative #2 -// } -// -// For multi-alternative encodings, use Code instead. -func (w *Encoder) Bool(b bool) bool { - w.Sync(SyncBool) - var x byte - if b { - x = 1 - } - err := w.Data.WriteByte(x) - w.checkErr(err) - return b -} - -// Int64 encodes and writes an int64 value into the element bitstream. -func (w *Encoder) Int64(x int64) { - w.Sync(SyncInt64) - w.rawVarint(x) -} - -// Uint64 encodes and writes a uint64 value into the element bitstream. -func (w *Encoder) Uint64(x uint64) { - w.Sync(SyncUint64) - w.rawUvarint(x) -} - -// Len encodes and writes a non-negative int value into the element bitstream. -func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } - -// Int encodes and writes an int value into the element bitstream. -func (w *Encoder) Int(x int) { w.Int64(int64(x)) } - -// Uint encodes and writes a uint value into the element bitstream. -func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } - -// Reloc encodes and writes a relocation for the given (section, -// index) pair into the element bitstream. -// -// Note: Only the index is formally written into the element -// bitstream, so bitstream decoders must know from context which -// section an encoded relocation refers to. -func (w *Encoder) Reloc(r RelocKind, idx Index) { - w.Sync(SyncUseReloc) - w.Len(w.rawReloc(r, idx)) -} - -// Code encodes and writes a Code value into the element bitstream. -func (w *Encoder) Code(c Code) { - w.Sync(c.Marker()) - w.Len(c.Value()) -} - -// String encodes and writes a string value into the element -// bitstream. -// -// Internally, strings are deduplicated by adding them to the strings -// section (if not already present), and then writing a relocation -// into the element bitstream. -func (w *Encoder) String(s string) { - w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) -} - -// Strings encodes and writes a variable-length slice of strings into -// the element bitstream. -func (w *Encoder) Strings(ss []string) { - w.Len(len(ss)) - for _, s := range ss { - w.String(s) - } -} - -// Value encodes and writes a constant.Value into the element -// bitstream. -func (w *Encoder) Value(val constant.Value) { - w.Sync(SyncValue) - if w.Bool(val.Kind() == constant.Complex) { - w.scalar(constant.Real(val)) - w.scalar(constant.Imag(val)) - } else { - w.scalar(val) - } -} - -func (w *Encoder) scalar(val constant.Value) { - switch v := constant.Val(val).(type) { - default: - errorf("unhandled %v (%v)", val, val.Kind()) - case bool: - w.Code(ValBool) - w.Bool(v) - case string: - w.Code(ValString) - w.String(v) - case int64: - w.Code(ValInt64) - w.Int64(v) - case *big.Int: - w.Code(ValBigInt) - w.bigInt(v) - case *big.Rat: - w.Code(ValBigRat) - w.bigInt(v.Num()) - w.bigInt(v.Denom()) - case *big.Float: - w.Code(ValBigFloat) - w.bigFloat(v) - } -} - -func (w *Encoder) bigInt(v *big.Int) { - b := v.Bytes() - w.String(string(b)) // TODO: More efficient encoding. - w.Bool(v.Sign() < 0) -} - -func (w *Encoder) bigFloat(v *big.Float) { - b := v.Append(nil, 'p', -1) - w.String(string(b)) // TODO: More efficient encoding. -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/internal/pkgbits/flags.go deleted file mode 100644 index 654222745..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/flags.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -const ( - flagSyncMarkers = 1 << iota // file format contains sync markers -) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a63..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7ad..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go deleted file mode 100644 index fcdfb97ca..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -// A RelocKind indicates a particular section within a unified IR export. -type RelocKind int32 - -// An Index represents a bitstream element index within a particular -// section. -type Index int32 - -// A relocEnt (relocation entry) is an entry in an element's local -// reference table. -// -// TODO(mdempsky): Rename this too. -type RelocEnt struct { - Kind RelocKind - Idx Index -} - -// Reserved indices within the meta relocation section. -const ( - PublicRootIdx Index = 0 - PrivateRootIdx Index = 1 -) - -const ( - RelocString RelocKind = iota - RelocMeta - RelocPosBase - RelocPkg - RelocName - RelocType - RelocObj - RelocObjExt - RelocObjDict - RelocBody - - numRelocs = iota -) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go deleted file mode 100644 index ad26d3b28..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import "fmt" - -func assert(b bool) { - if !b { - panic("assertion failed") - } -} - -func errorf(format string, args ...interface{}) { - panic(fmt.Errorf(format, args...)) -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go deleted file mode 100644 index 5bd51ef71..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkgbits - -import ( - "fmt" - "strings" -) - -// fmtFrames formats a backtrace for reporting reader/writer desyncs. -func fmtFrames(pcs ...uintptr) []string { - res := make([]string, 0, len(pcs)) - walkFrames(pcs, func(file string, line int, name string, offset uintptr) { - // Trim package from function name. It's just redundant noise. - name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") - - res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) - }) - return res -} - -type frameVisitor func(file string, line int, name string, offset uintptr) - -// SyncMarker is an enum type that represents markers that may be -// written to export data to ensure the reader and writer stay -// synchronized. -type SyncMarker int - -//go:generate stringer -type=SyncMarker -trimprefix=Sync - -const ( - _ SyncMarker = iota - - // Public markers (known to go/types importers). - - // Low-level coding markers. - SyncEOF - SyncBool - SyncInt64 - SyncUint64 - SyncString - SyncValue - SyncVal - SyncRelocs - SyncReloc - SyncUseReloc - - // Higher-level object and type markers. - SyncPublic - SyncPos - SyncPosBase - SyncObject - SyncObject1 - SyncPkg - SyncPkgDef - SyncMethod - SyncType - SyncTypeIdx - SyncTypeParamNames - SyncSignature - SyncParams - SyncParam - SyncCodeObj - SyncSym - SyncLocalIdent - SyncSelector - - // Private markers (only known to cmd/compile). - SyncPrivate - - SyncFuncExt - SyncVarExt - SyncTypeExt - SyncPragma - - SyncExprList - SyncExprs - SyncExpr - SyncExprType - SyncAssign - SyncOp - SyncFuncLit - SyncCompLit - - SyncDecl - SyncFuncBody - SyncOpenScope - SyncCloseScope - SyncCloseAnotherScope - SyncDeclNames - SyncDeclName - - SyncStmts - SyncBlockStmt - SyncIfStmt - SyncForStmt - SyncSwitchStmt - SyncRangeStmt - SyncCaseClause - SyncCommClause - SyncSelectStmt - SyncDecls - SyncLabeledStmt - SyncUseObjLocal - SyncAddLocal - SyncLinkname - SyncStmt1 - SyncStmtsEnd - SyncLabel - SyncOptLabel -) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go deleted file mode 100644 index 4a5b0ca5f..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ /dev/null @@ -1,89 +0,0 @@ -// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. - -package pkgbits - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[SyncEOF-1] - _ = x[SyncBool-2] - _ = x[SyncInt64-3] - _ = x[SyncUint64-4] - _ = x[SyncString-5] - _ = x[SyncValue-6] - _ = x[SyncVal-7] - _ = x[SyncRelocs-8] - _ = x[SyncReloc-9] - _ = x[SyncUseReloc-10] - _ = x[SyncPublic-11] - _ = x[SyncPos-12] - _ = x[SyncPosBase-13] - _ = x[SyncObject-14] - _ = x[SyncObject1-15] - _ = x[SyncPkg-16] - _ = x[SyncPkgDef-17] - _ = x[SyncMethod-18] - _ = x[SyncType-19] - _ = x[SyncTypeIdx-20] - _ = x[SyncTypeParamNames-21] - _ = x[SyncSignature-22] - _ = x[SyncParams-23] - _ = x[SyncParam-24] - _ = x[SyncCodeObj-25] - _ = x[SyncSym-26] - _ = x[SyncLocalIdent-27] - _ = x[SyncSelector-28] - _ = x[SyncPrivate-29] - _ = x[SyncFuncExt-30] - _ = x[SyncVarExt-31] - _ = x[SyncTypeExt-32] - _ = x[SyncPragma-33] - _ = x[SyncExprList-34] - _ = x[SyncExprs-35] - _ = x[SyncExpr-36] - _ = x[SyncExprType-37] - _ = x[SyncAssign-38] - _ = x[SyncOp-39] - _ = x[SyncFuncLit-40] - _ = x[SyncCompLit-41] - _ = x[SyncDecl-42] - _ = x[SyncFuncBody-43] - _ = x[SyncOpenScope-44] - _ = x[SyncCloseScope-45] - _ = x[SyncCloseAnotherScope-46] - _ = x[SyncDeclNames-47] - _ = x[SyncDeclName-48] - _ = x[SyncStmts-49] - _ = x[SyncBlockStmt-50] - _ = x[SyncIfStmt-51] - _ = x[SyncForStmt-52] - _ = x[SyncSwitchStmt-53] - _ = x[SyncRangeStmt-54] - _ = x[SyncCaseClause-55] - _ = x[SyncCommClause-56] - _ = x[SyncSelectStmt-57] - _ = x[SyncDecls-58] - _ = x[SyncLabeledStmt-59] - _ = x[SyncUseObjLocal-60] - _ = x[SyncAddLocal-61] - _ = x[SyncLinkname-62] - _ = x[SyncStmt1-63] - _ = x[SyncStmtsEnd-64] - _ = x[SyncLabel-65] - _ = x[SyncOptLabel-66] -} - -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" - -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} - -func (i SyncMarker) String() string { - i -= 1 - if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { - return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] -} diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index ff9437a36..000000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - - if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { - panic("unexpected token.File size") - } - var ptr *tokenFile119 - type uP = unsafe.Pointer - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go deleted file mode 100644 index 834e05381..000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ /dev/null @@ -1,1560 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typesinternal - -//go:generate stringer -type=ErrorCode - -type ErrorCode int - -// This file defines the error codes that can be produced during type-checking. -// Collectively, these codes provide an identifier that may be used to -// implement special handling for certain types of errors. -// -// Error codes should be fine-grained enough that the exact nature of the error -// can be easily determined, but coarse enough that they are not an -// implementation detail of the type checking algorithm. As a rule-of-thumb, -// errors should be considered equivalent if there is a theoretical refactoring -// of the type checker in which they are emitted in exactly one place. For -// example, the type checker emits different error messages for "too many -// arguments" and "too few arguments", but one can imagine an alternative type -// checker where this check instead just emits a single "wrong number of -// arguments", so these errors should have the same code. -// -// Error code names should be as brief as possible while retaining accuracy and -// distinctiveness. In most cases names should start with an adjective -// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), -// and end with a noun identifying the relevant language object. For example, -// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the -// convention that "bad" implies a problem with syntax, and "invalid" implies a -// problem with types. - -const ( - // InvalidSyntaxTree occurs if an invalid syntax tree is provided - // to the type checker. It should never happen. - InvalidSyntaxTree ErrorCode = -1 -) - -const ( - _ ErrorCode = iota - - // Test is reserved for errors that only apply while in self-test mode. - Test - - /* package names */ - - // BlankPkgName occurs when a package name is the blank identifier "_". - // - // Per the spec: - // "The PackageName must not be the blank identifier." - BlankPkgName - - // MismatchedPkgName occurs when a file's package name doesn't match the - // package name already established by other files. - MismatchedPkgName - - // InvalidPkgUse occurs when a package identifier is used outside of a - // selector expression. - // - // Example: - // import "fmt" - // - // var _ = fmt - InvalidPkgUse - - /* imports */ - - // BadImportPath occurs when an import path is not valid. - BadImportPath - - // BrokenImport occurs when importing a package fails. - // - // Example: - // import "amissingpackage" - BrokenImport - - // ImportCRenamed occurs when the special import "C" is renamed. "C" is a - // pseudo-package, and must not be renamed. - // - // Example: - // import _ "C" - ImportCRenamed - - // UnusedImport occurs when an import is unused. - // - // Example: - // import "fmt" - // - // func main() {} - UnusedImport - - /* initialization */ - - // InvalidInitCycle occurs when an invalid cycle is detected within the - // initialization graph. - // - // Example: - // var x int = f() - // - // func f() int { return x } - InvalidInitCycle - - /* decls */ - - // DuplicateDecl occurs when an identifier is declared multiple times. - // - // Example: - // var x = 1 - // var x = 2 - DuplicateDecl - - // InvalidDeclCycle occurs when a declaration cycle is not valid. - // - // Example: - // import "unsafe" - // - // type T struct { - // a [n]int - // } - // - // var n = unsafe.Sizeof(T{}) - InvalidDeclCycle - - // InvalidTypeCycle occurs when a cycle in type definitions results in a - // type that is not well-defined. - // - // Example: - // import "unsafe" - // - // type T [unsafe.Sizeof(T{})]int - InvalidTypeCycle - - /* decls > const */ - - // InvalidConstInit occurs when a const declaration has a non-constant - // initializer. - // - // Example: - // var x int - // const _ = x - InvalidConstInit - - // InvalidConstVal occurs when a const value cannot be converted to its - // target type. - // - // TODO(findleyr): this error code and example are not very clear. Consider - // removing it. - // - // Example: - // const _ = 1 << "hello" - InvalidConstVal - - // InvalidConstType occurs when the underlying type in a const declaration - // is not a valid constant type. - // - // Example: - // const c *int = 4 - InvalidConstType - - /* decls > var (+ other variable assignment codes) */ - - // UntypedNilUse occurs when the predeclared (untyped) value nil is used to - // initialize a variable declared without an explicit type. - // - // Example: - // var x = nil - UntypedNilUse - - // WrongAssignCount occurs when the number of values on the right-hand side - // of an assignment or initialization expression does not match the number - // of variables on the left-hand side. - // - // Example: - // var x = 1, 2 - WrongAssignCount - - // UnassignableOperand occurs when the left-hand side of an assignment is - // not assignable. - // - // Example: - // func f() { - // const c = 1 - // c = 2 - // } - UnassignableOperand - - // NoNewVar occurs when a short variable declaration (':=') does not declare - // new variables. - // - // Example: - // func f() { - // x := 1 - // x := 2 - // } - NoNewVar - - // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does - // not have single-valued left-hand or right-hand side. - // - // Per the spec: - // "In assignment operations, both the left- and right-hand expression lists - // must contain exactly one single-valued expression" - // - // Example: - // func f() int { - // x, y := 1, 2 - // x, y += 1 - // return x + y - // } - MultiValAssignOp - - // InvalidIfaceAssign occurs when a value of type T is used as an - // interface, but T does not implement a method of the expected interface. - // - // Example: - // type I interface { - // f() - // } - // - // type T int - // - // var x I = T(1) - InvalidIfaceAssign - - // InvalidChanAssign occurs when a chan assignment is invalid. - // - // Per the spec, a value x is assignable to a channel type T if: - // "x is a bidirectional channel value, T is a channel type, x's type V and - // T have identical element types, and at least one of V or T is not a - // defined type." - // - // Example: - // type T1 chan int - // type T2 chan int - // - // var x T1 - // // Invalid assignment because both types are named - // var _ T2 = x - InvalidChanAssign - - // IncompatibleAssign occurs when the type of the right-hand side expression - // in an assignment cannot be assigned to the type of the variable being - // assigned. - // - // Example: - // var x []int - // var _ int = x - IncompatibleAssign - - // UnaddressableFieldAssign occurs when trying to assign to a struct field - // in a map value. - // - // Example: - // func f() { - // m := make(map[string]struct{i int}) - // m["foo"].i = 42 - // } - UnaddressableFieldAssign - - /* decls > type (+ other type expression codes) */ - - // NotAType occurs when the identifier used as the underlying type in a type - // declaration or the right-hand side of a type alias does not denote a type. - // - // Example: - // var S = 2 - // - // type T S - NotAType - - // InvalidArrayLen occurs when an array length is not a constant value. - // - // Example: - // var n = 3 - // var _ = [n]int{} - InvalidArrayLen - - // BlankIfaceMethod occurs when a method name is '_'. - // - // Per the spec: - // "The name of each explicitly specified method must be unique and not - // blank." - // - // Example: - // type T interface { - // _(int) - // } - BlankIfaceMethod - - // IncomparableMapKey occurs when a map key type does not support the == and - // != operators. - // - // Per the spec: - // "The comparison operators == and != must be fully defined for operands of - // the key type; thus the key type must not be a function, map, or slice." - // - // Example: - // var x map[T]int - // - // type T []int - IncomparableMapKey - - // InvalidIfaceEmbed occurs when a non-interface type is embedded in an - // interface. - // - // Example: - // type T struct {} - // - // func (T) m() - // - // type I interface { - // T - // } - InvalidIfaceEmbed - - // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, - // and T itself is itself a pointer, an unsafe.Pointer, or an interface. - // - // Per the spec: - // "An embedded field must be specified as a type name T or as a pointer to - // a non-interface type name *T, and T itself may not be a pointer type." - // - // Example: - // type T *int - // - // type S struct { - // *T - // } - InvalidPtrEmbed - - /* decls > func and method */ - - // BadRecv occurs when a method declaration does not have exactly one - // receiver parameter. - // - // Example: - // func () _() {} - BadRecv - - // InvalidRecv occurs when a receiver type expression is not of the form T - // or *T, or T is a pointer type. - // - // Example: - // type T struct {} - // - // func (**T) m() {} - InvalidRecv - - // DuplicateFieldAndMethod occurs when an identifier appears as both a field - // and method name. - // - // Example: - // type T struct { - // m int - // } - // - // func (T) m() {} - DuplicateFieldAndMethod - - // DuplicateMethod occurs when two methods on the same receiver type have - // the same name. - // - // Example: - // type T struct {} - // func (T) m() {} - // func (T) m(i int) int { return i } - DuplicateMethod - - /* decls > special */ - - // InvalidBlank occurs when a blank identifier is used as a value or type. - // - // Per the spec: - // "The blank identifier may appear as an operand only on the left-hand side - // of an assignment." - // - // Example: - // var x = _ - InvalidBlank - - // InvalidIota occurs when the predeclared identifier iota is used outside - // of a constant declaration. - // - // Example: - // var x = iota - InvalidIota - - // MissingInitBody occurs when an init function is missing its body. - // - // Example: - // func init() - MissingInitBody - - // InvalidInitSig occurs when an init function declares parameters or - // results. - // - // Example: - // func init() int { return 1 } - InvalidInitSig - - // InvalidInitDecl occurs when init is declared as anything other than a - // function. - // - // Example: - // var init = 1 - InvalidInitDecl - - // InvalidMainDecl occurs when main is declared as anything other than a - // function, in a main package. - InvalidMainDecl - - /* exprs */ - - // TooManyValues occurs when a function returns too many values for the - // expression context in which it is used. - // - // Example: - // func ReturnTwo() (int, int) { - // return 1, 2 - // } - // - // var x = ReturnTwo() - TooManyValues - - // NotAnExpr occurs when a type expression is used where a value expression - // is expected. - // - // Example: - // type T struct {} - // - // func f() { - // T - // } - NotAnExpr - - /* exprs > const */ - - // TruncatedFloat occurs when a float constant is truncated to an integer - // value. - // - // Example: - // var _ int = 98.6 - TruncatedFloat - - // NumericOverflow occurs when a numeric constant overflows its target type. - // - // Example: - // var x int8 = 1000 - NumericOverflow - - /* exprs > operation */ - - // UndefinedOp occurs when an operator is not defined for the type(s) used - // in an operation. - // - // Example: - // var c = "a" - "b" - UndefinedOp - - // MismatchedTypes occurs when operand types are incompatible in a binary - // operation. - // - // Example: - // var a = "hello" - // var b = 1 - // var c = a - b - MismatchedTypes - - // DivByZero occurs when a division operation is provable at compile - // time to be a division by zero. - // - // Example: - // const divisor = 0 - // var x int = 1/divisor - DivByZero - - // NonNumericIncDec occurs when an increment or decrement operator is - // applied to a non-numeric value. - // - // Example: - // func f() { - // var c = "c" - // c++ - // } - NonNumericIncDec - - /* exprs > ptr */ - - // UnaddressableOperand occurs when the & operator is applied to an - // unaddressable expression. - // - // Example: - // var x = &1 - UnaddressableOperand - - // InvalidIndirection occurs when a non-pointer value is indirected via the - // '*' operator. - // - // Example: - // var x int - // var y = *x - InvalidIndirection - - /* exprs > [] */ - - // NonIndexableOperand occurs when an index operation is applied to a value - // that cannot be indexed. - // - // Example: - // var x = 1 - // var y = x[1] - NonIndexableOperand - - // InvalidIndex occurs when an index argument is not of integer type, - // negative, or out-of-bounds. - // - // Example: - // var s = [...]int{1,2,3} - // var x = s[5] - // - // Example: - // var s = []int{1,2,3} - // var _ = s[-1] - // - // Example: - // var s = []int{1,2,3} - // var i string - // var _ = s[i] - InvalidIndex - - // SwappedSliceIndices occurs when constant indices in a slice expression - // are decreasing in value. - // - // Example: - // var _ = []int{1,2,3}[2:1] - SwappedSliceIndices - - /* operators > slice */ - - // NonSliceableOperand occurs when a slice operation is applied to a value - // whose type is not sliceable, or is unaddressable. - // - // Example: - // var x = [...]int{1, 2, 3}[:1] - // - // Example: - // var x = 1 - // var y = 1[:1] - NonSliceableOperand - - // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is - // applied to a string. - // - // Example: - // var s = "hello" - // var x = s[1:2:3] - InvalidSliceExpr - - /* exprs > shift */ - - // InvalidShiftCount occurs when the right-hand side of a shift operation is - // either non-integer, negative, or too large. - // - // Example: - // var ( - // x string - // y int = 1 << x - // ) - InvalidShiftCount - - // InvalidShiftOperand occurs when the shifted operand is not an integer. - // - // Example: - // var s = "hello" - // var x = s << 2 - InvalidShiftOperand - - /* exprs > chan */ - - // InvalidReceive occurs when there is a channel receive from a value that - // is either not a channel, or is a send-only channel. - // - // Example: - // func f() { - // var x = 1 - // <-x - // } - InvalidReceive - - // InvalidSend occurs when there is a channel send to a value that is not a - // channel, or is a receive-only channel. - // - // Example: - // func f() { - // var x = 1 - // x <- "hello!" - // } - InvalidSend - - /* exprs > literal */ - - // DuplicateLitKey occurs when an index is duplicated in a slice, array, or - // map literal. - // - // Example: - // var _ = []int{0:1, 0:2} - // - // Example: - // var _ = map[string]int{"a": 1, "a": 2} - DuplicateLitKey - - // MissingLitKey occurs when a map literal is missing a key expression. - // - // Example: - // var _ = map[string]int{1} - MissingLitKey - - // InvalidLitIndex occurs when the key in a key-value element of a slice or - // array literal is not an integer constant. - // - // Example: - // var i = 0 - // var x = []string{i: "world"} - InvalidLitIndex - - // OversizeArrayLit occurs when an array literal exceeds its length. - // - // Example: - // var _ = [2]int{1,2,3} - OversizeArrayLit - - // MixedStructLit occurs when a struct literal contains a mix of positional - // and named elements. - // - // Example: - // var _ = struct{i, j int}{i: 1, 2} - MixedStructLit - - // InvalidStructLit occurs when a positional struct literal has an incorrect - // number of values. - // - // Example: - // var _ = struct{i, j int}{1,2,3} - InvalidStructLit - - // MissingLitField occurs when a struct literal refers to a field that does - // not exist on the struct type. - // - // Example: - // var _ = struct{i int}{j: 2} - MissingLitField - - // DuplicateLitField occurs when a struct literal contains duplicated - // fields. - // - // Example: - // var _ = struct{i int}{i: 1, i: 2} - DuplicateLitField - - // UnexportedLitField occurs when a positional struct literal implicitly - // assigns an unexported field of an imported type. - UnexportedLitField - - // InvalidLitField occurs when a field name is not a valid identifier. - // - // Example: - // var _ = struct{i int}{1: 1} - InvalidLitField - - // UntypedLit occurs when a composite literal omits a required type - // identifier. - // - // Example: - // type outer struct{ - // inner struct { i int } - // } - // - // var _ = outer{inner: {1}} - UntypedLit - - // InvalidLit occurs when a composite literal expression does not match its - // type. - // - // Example: - // type P *struct{ - // x int - // } - // var _ = P {} - InvalidLit - - /* exprs > selector */ - - // AmbiguousSelector occurs when a selector is ambiguous. - // - // Example: - // type E1 struct { i int } - // type E2 struct { i int } - // type T struct { E1; E2 } - // - // var x T - // var _ = x.i - AmbiguousSelector - - // UndeclaredImportedName occurs when a package-qualified identifier is - // undeclared by the imported package. - // - // Example: - // import "go/types" - // - // var _ = types.NotAnActualIdentifier - UndeclaredImportedName - - // UnexportedName occurs when a selector refers to an unexported identifier - // of an imported package. - // - // Example: - // import "reflect" - // - // type _ reflect.flag - UnexportedName - - // UndeclaredName occurs when an identifier is not declared in the current - // scope. - // - // Example: - // var x T - UndeclaredName - - // MissingFieldOrMethod occurs when a selector references a field or method - // that does not exist. - // - // Example: - // type T struct {} - // - // var x = T{}.f - MissingFieldOrMethod - - /* exprs > ... */ - - // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is - // not valid. - // - // Example: - // var _ = map[int][...]int{0: {}} - BadDotDotDotSyntax - - // NonVariadicDotDotDot occurs when a "..." is used on the final argument to - // a non-variadic function. - // - // Example: - // func printArgs(s []string) { - // for _, a := range s { - // println(a) - // } - // } - // - // func f() { - // s := []string{"a", "b", "c"} - // printArgs(s...) - // } - NonVariadicDotDotDot - - // MisplacedDotDotDot occurs when a "..." is used somewhere other than the - // final argument to a function call. - // - // Example: - // func printArgs(args ...int) { - // for _, a := range args { - // println(a) - // } - // } - // - // func f() { - // a := []int{1,2,3} - // printArgs(0, a...) - // } - MisplacedDotDotDot - - // InvalidDotDotDotOperand occurs when a "..." operator is applied to a - // single-valued operand. - // - // Example: - // func printArgs(args ...int) { - // for _, a := range args { - // println(a) - // } - // } - // - // func f() { - // a := 1 - // printArgs(a...) - // } - // - // Example: - // func args() (int, int) { - // return 1, 2 - // } - // - // func printArgs(args ...int) { - // for _, a := range args { - // println(a) - // } - // } - // - // func g() { - // printArgs(args()...) - // } - InvalidDotDotDotOperand - - // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in - // function. - // - // Example: - // var s = []int{1, 2, 3} - // var l = len(s...) - InvalidDotDotDot - - /* exprs > built-in */ - - // UncalledBuiltin occurs when a built-in function is used as a - // function-valued expression, instead of being called. - // - // Per the spec: - // "The built-in functions do not have standard Go types, so they can only - // appear in call expressions; they cannot be used as function values." - // - // Example: - // var _ = copy - UncalledBuiltin - - // InvalidAppend occurs when append is called with a first argument that is - // not a slice. - // - // Example: - // var _ = append(1, 2) - InvalidAppend - - // InvalidCap occurs when an argument to the cap built-in function is not of - // supported type. - // - // See https://golang.org/ref/spec#Lengthand_capacity for information on - // which underlying types are supported as arguments to cap and len. - // - // Example: - // var s = 2 - // var x = cap(s) - InvalidCap - - // InvalidClose occurs when close(...) is called with an argument that is - // not of channel type, or that is a receive-only channel. - // - // Example: - // func f() { - // var x int - // close(x) - // } - InvalidClose - - // InvalidCopy occurs when the arguments are not of slice type or do not - // have compatible type. - // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more - // information on the type requirements for the copy built-in. - // - // Example: - // func f() { - // var x []int - // y := []int64{1,2,3} - // copy(x, y) - // } - InvalidCopy - - // InvalidComplex occurs when the complex built-in function is called with - // arguments with incompatible types. - // - // Example: - // var _ = complex(float32(1), float64(2)) - InvalidComplex - - // InvalidDelete occurs when the delete built-in function is called with a - // first argument that is not a map. - // - // Example: - // func f() { - // m := "hello" - // delete(m, "e") - // } - InvalidDelete - - // InvalidImag occurs when the imag built-in function is called with an - // argument that does not have complex type. - // - // Example: - // var _ = imag(int(1)) - InvalidImag - - // InvalidLen occurs when an argument to the len built-in function is not of - // supported type. - // - // See https://golang.org/ref/spec#Lengthand_capacity for information on - // which underlying types are supported as arguments to cap and len. - // - // Example: - // var s = 2 - // var x = len(s) - InvalidLen - - // SwappedMakeArgs occurs when make is called with three arguments, and its - // length argument is larger than its capacity argument. - // - // Example: - // var x = make([]int, 3, 2) - SwappedMakeArgs - - // InvalidMake occurs when make is called with an unsupported type argument. - // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for - // information on the types that may be created using make. - // - // Example: - // var x = make(int) - InvalidMake - - // InvalidReal occurs when the real built-in function is called with an - // argument that does not have complex type. - // - // Example: - // var _ = real(int(1)) - InvalidReal - - /* exprs > assertion */ - - // InvalidAssert occurs when a type assertion is applied to a - // value that is not of interface type. - // - // Example: - // var x = 1 - // var _ = x.(float64) - InvalidAssert - - // ImpossibleAssert occurs for a type assertion x.(T) when the value x of - // interface cannot have dynamic type T, due to a missing or mismatching - // method on T. - // - // Example: - // type T int - // - // func (t *T) m() int { return int(*t) } - // - // type I interface { m() int } - // - // var x I - // var _ = x.(T) - ImpossibleAssert - - /* exprs > conversion */ - - // InvalidConversion occurs when the argument type cannot be converted to the - // target. - // - // See https://golang.org/ref/spec#Conversions for the rules of - // convertibility. - // - // Example: - // var x float64 - // var _ = string(x) - InvalidConversion - - // InvalidUntypedConversion occurs when an there is no valid implicit - // conversion from an untyped value satisfying the type constraints of the - // context in which it is used. - // - // Example: - // var _ = 1 + "" - InvalidUntypedConversion - - /* offsetof */ - - // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument - // that is not a selector expression. - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Offsetof(x) - BadOffsetofSyntax - - // InvalidOffsetof occurs when unsafe.Offsetof is called with a method - // selector, rather than a field selector, or when the field is embedded via - // a pointer. - // - // Per the spec: - // - // "If f is an embedded field, it must be reachable without pointer - // indirections through fields of the struct. " - // - // Example: - // import "unsafe" - // - // type T struct { f int } - // type S struct { *T } - // var s S - // var _ = unsafe.Offsetof(s.f) - // - // Example: - // import "unsafe" - // - // type S struct{} - // - // func (S) m() {} - // - // var s S - // var _ = unsafe.Offsetof(s.m) - InvalidOffsetof - - /* control flow > scope */ - - // UnusedExpr occurs when a side-effect free expression is used as a - // statement. Such a statement has no effect. - // - // Example: - // func f(i int) { - // i*i - // } - UnusedExpr - - // UnusedVar occurs when a variable is declared but unused. - // - // Example: - // func f() { - // x := 1 - // } - UnusedVar - - // MissingReturn occurs when a function with results is missing a return - // statement. - // - // Example: - // func f() int {} - MissingReturn - - // WrongResultCount occurs when a return statement returns an incorrect - // number of values. - // - // Example: - // func ReturnOne() int { - // return 1, 2 - // } - WrongResultCount - - // OutOfScopeResult occurs when the name of a value implicitly returned by - // an empty return statement is shadowed in a nested scope. - // - // Example: - // func factor(n int) (i int) { - // for i := 2; i < n; i++ { - // if n%i == 0 { - // return - // } - // } - // return 0 - // } - OutOfScopeResult - - /* control flow > if */ - - // InvalidCond occurs when an if condition is not a boolean expression. - // - // Example: - // func checkReturn(i int) { - // if i { - // panic("non-zero return") - // } - // } - InvalidCond - - /* control flow > for */ - - // InvalidPostDecl occurs when there is a declaration in a for-loop post - // statement. - // - // Example: - // func f() { - // for i := 0; i < 10; j := 0 {} - // } - InvalidPostDecl - - // InvalidChanRange occurs when a send-only channel used in a range - // expression. - // - // Example: - // func sum(c chan<- int) { - // s := 0 - // for i := range c { - // s += i - // } - // } - InvalidChanRange - - // InvalidIterVar occurs when two iteration variables are used while ranging - // over a channel. - // - // Example: - // func f(c chan int) { - // for k, v := range c { - // println(k, v) - // } - // } - InvalidIterVar - - // InvalidRangeExpr occurs when the type of a range expression is not array, - // slice, string, map, or channel. - // - // Example: - // func f(i int) { - // for j := range i { - // println(j) - // } - // } - InvalidRangeExpr - - /* control flow > switch */ - - // MisplacedBreak occurs when a break statement is not within a for, switch, - // or select statement of the innermost function definition. - // - // Example: - // func f() { - // break - // } - MisplacedBreak - - // MisplacedContinue occurs when a continue statement is not within a for - // loop of the innermost function definition. - // - // Example: - // func sumeven(n int) int { - // proceed := func() { - // continue - // } - // sum := 0 - // for i := 1; i <= n; i++ { - // if i % 2 != 0 { - // proceed() - // } - // sum += i - // } - // return sum - // } - MisplacedContinue - - // MisplacedFallthrough occurs when a fallthrough statement is not within an - // expression switch. - // - // Example: - // func typename(i interface{}) string { - // switch i.(type) { - // case int64: - // fallthrough - // case int: - // return "int" - // } - // return "unsupported" - // } - MisplacedFallthrough - - // DuplicateCase occurs when a type or expression switch has duplicate - // cases. - // - // Example: - // func printInt(i int) { - // switch i { - // case 1: - // println("one") - // case 1: - // println("One") - // } - // } - DuplicateCase - - // DuplicateDefault occurs when a type or expression switch has multiple - // default clauses. - // - // Example: - // func printInt(i int) { - // switch i { - // case 1: - // println("one") - // default: - // println("One") - // default: - // println("1") - // } - // } - DuplicateDefault - - // BadTypeKeyword occurs when a .(type) expression is used anywhere other - // than a type switch. - // - // Example: - // type I interface { - // m() - // } - // var t I - // var _ = t.(type) - BadTypeKeyword - - // InvalidTypeSwitch occurs when .(type) is used on an expression that is - // not of interface type. - // - // Example: - // func f(i int) { - // switch x := i.(type) {} - // } - InvalidTypeSwitch - - // InvalidExprSwitch occurs when a switch expression is not comparable. - // - // Example: - // func _() { - // var a struct{ _ func() } - // switch a /* ERROR cannot switch on a */ { - // } - // } - InvalidExprSwitch - - /* control flow > select */ - - // InvalidSelectCase occurs when a select case is not a channel send or - // receive. - // - // Example: - // func checkChan(c <-chan int) bool { - // select { - // case c: - // return true - // default: - // return false - // } - // } - InvalidSelectCase - - /* control flow > labels and jumps */ - - // UndeclaredLabel occurs when an undeclared label is jumped to. - // - // Example: - // func f() { - // goto L - // } - UndeclaredLabel - - // DuplicateLabel occurs when a label is declared more than once. - // - // Example: - // func f() int { - // L: - // L: - // return 1 - // } - DuplicateLabel - - // MisplacedLabel occurs when a break or continue label is not on a for, - // switch, or select statement. - // - // Example: - // func f() { - // L: - // a := []int{1,2,3} - // for _, e := range a { - // if e > 10 { - // break L - // } - // println(a) - // } - // } - MisplacedLabel - - // UnusedLabel occurs when a label is declared but not used. - // - // Example: - // func f() { - // L: - // } - UnusedLabel - - // JumpOverDecl occurs when a label jumps over a variable declaration. - // - // Example: - // func f() int { - // goto L - // x := 2 - // L: - // x++ - // return x - // } - JumpOverDecl - - // JumpIntoBlock occurs when a forward jump goes to a label inside a nested - // block. - // - // Example: - // func f(x int) { - // goto L - // if x > 0 { - // L: - // print("inside block") - // } - // } - JumpIntoBlock - - /* control flow > calls */ - - // InvalidMethodExpr occurs when a pointer method is called but the argument - // is not addressable. - // - // Example: - // type T struct {} - // - // func (*T) m() int { return 1 } - // - // var _ = T.m(T{}) - InvalidMethodExpr - - // WrongArgCount occurs when too few or too many arguments are passed by a - // function call. - // - // Example: - // func f(i int) {} - // var x = f() - WrongArgCount - - // InvalidCall occurs when an expression is called that is not of function - // type. - // - // Example: - // var x = "x" - // var y = x() - InvalidCall - - /* control flow > suspended */ - - // UnusedResults occurs when a restricted expression-only built-in function - // is suspended via go or defer. Such a suspension discards the results of - // these side-effect free built-in functions, and therefore is ineffectual. - // - // Example: - // func f(a []int) int { - // defer len(a) - // return i - // } - UnusedResults - - // InvalidDefer occurs when a deferred expression is not a function call, - // for example if the expression is a type conversion. - // - // Example: - // func f(i int) int { - // defer int32(i) - // return i - // } - InvalidDefer - - // InvalidGo occurs when a go expression is not a function call, for example - // if the expression is a type conversion. - // - // Example: - // func f(i int) int { - // go int32(i) - // return i - // } - InvalidGo - - // All codes below were added in Go 1.17. - - /* decl */ - - // BadDecl occurs when a declaration has invalid syntax. - BadDecl - - // RepeatedDecl occurs when an identifier occurs more than once on the left - // hand side of a short variable declaration. - // - // Example: - // func _() { - // x, y, y := 1, 2, 3 - // } - RepeatedDecl - - /* unsafe */ - - // InvalidUnsafeAdd occurs when unsafe.Add is called with a - // length argument that is not of integer type. - // - // Example: - // import "unsafe" - // - // var p unsafe.Pointer - // var _ = unsafe.Add(p, float64(1)) - InvalidUnsafeAdd - - // InvalidUnsafeSlice occurs when unsafe.Slice is called with a - // pointer argument that is not of pointer type or a length argument - // that is not of integer type, negative, or out of bounds. - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Slice(x, 1) - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Slice(&x, float64(1)) - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Slice(&x, -1) - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.Slice(&x, uint64(1) << 63) - InvalidUnsafeSlice - - // All codes below were added in Go 1.18. - - /* features */ - - // UnsupportedFeature occurs when a language feature is used that is not - // supported at this Go version. - UnsupportedFeature - - /* type params */ - - // NotAGenericType occurs when a non-generic type is used where a generic - // type is expected: in type or function instantiation. - // - // Example: - // type T int - // - // var _ T[int] - NotAGenericType - - // WrongTypeArgCount occurs when a type or function is instantiated with an - // incorrect number of type arguments, including when a generic type or - // function is used without instantiation. - // - // Errors involving failed type inference are assigned other error codes. - // - // Example: - // type T[p any] int - // - // var _ T[int, string] - // - // Example: - // func f[T any]() {} - // - // var x = f - WrongTypeArgCount - - // CannotInferTypeArgs occurs when type or function type argument inference - // fails to infer all type arguments. - // - // Example: - // func f[T any]() {} - // - // func _() { - // f() - // } - // - // Example: - // type N[P, Q any] struct{} - // - // var _ N[int] - CannotInferTypeArgs - - // InvalidTypeArg occurs when a type argument does not satisfy its - // corresponding type parameter constraints. - // - // Example: - // type T[P ~int] struct{} - // - // var _ T[string] - InvalidTypeArg // arguments? InferenceFailed - - // InvalidInstanceCycle occurs when an invalid cycle is detected - // within the instantiation graph. - // - // Example: - // func f[T any]() { f[*T]() } - InvalidInstanceCycle - - // InvalidUnion occurs when an embedded union or approximation element is - // not valid. - // - // Example: - // type _ interface { - // ~int | interface{ m() } - // } - InvalidUnion - - // MisplacedConstraintIface occurs when a constraint-type interface is used - // outside of constraint position. - // - // Example: - // type I interface { ~int } - // - // var _ I - MisplacedConstraintIface - - // InvalidMethodTypeParams occurs when methods have type parameters. - // - // It cannot be encountered with an AST parsed using go/parser. - InvalidMethodTypeParams - - // MisplacedTypeParam occurs when a type parameter is used in a place where - // it is not permitted. - // - // Example: - // type T[P any] P - // - // Example: - // type T[P any] struct{ *P } - MisplacedTypeParam - - // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with - // an argument that is not of slice type. It also occurs if it is used - // in a package compiled for a language version before go1.20. - // - // Example: - // import "unsafe" - // - // var x int - // var _ = unsafe.SliceData(x) - InvalidUnsafeSliceData - - // InvalidUnsafeString occurs when unsafe.String is called with - // a length argument that is not of integer type, negative, or - // out of bounds. It also occurs if it is used in a package - // compiled for a language version before go1.20. - // - // Example: - // import "unsafe" - // - // var b [10]byte - // var _ = unsafe.String(&b[0], -1) - InvalidUnsafeString - - // InvalidUnsafeStringData occurs if it is used in a package - // compiled for a language version before go1.20. - _ // not used anymore - -) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go deleted file mode 100644 index 15ecf7c5d..000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. - -package typesinternal - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[InvalidSyntaxTree - -1] - _ = x[Test-1] - _ = x[BlankPkgName-2] - _ = x[MismatchedPkgName-3] - _ = x[InvalidPkgUse-4] - _ = x[BadImportPath-5] - _ = x[BrokenImport-6] - _ = x[ImportCRenamed-7] - _ = x[UnusedImport-8] - _ = x[InvalidInitCycle-9] - _ = x[DuplicateDecl-10] - _ = x[InvalidDeclCycle-11] - _ = x[InvalidTypeCycle-12] - _ = x[InvalidConstInit-13] - _ = x[InvalidConstVal-14] - _ = x[InvalidConstType-15] - _ = x[UntypedNilUse-16] - _ = x[WrongAssignCount-17] - _ = x[UnassignableOperand-18] - _ = x[NoNewVar-19] - _ = x[MultiValAssignOp-20] - _ = x[InvalidIfaceAssign-21] - _ = x[InvalidChanAssign-22] - _ = x[IncompatibleAssign-23] - _ = x[UnaddressableFieldAssign-24] - _ = x[NotAType-25] - _ = x[InvalidArrayLen-26] - _ = x[BlankIfaceMethod-27] - _ = x[IncomparableMapKey-28] - _ = x[InvalidIfaceEmbed-29] - _ = x[InvalidPtrEmbed-30] - _ = x[BadRecv-31] - _ = x[InvalidRecv-32] - _ = x[DuplicateFieldAndMethod-33] - _ = x[DuplicateMethod-34] - _ = x[InvalidBlank-35] - _ = x[InvalidIota-36] - _ = x[MissingInitBody-37] - _ = x[InvalidInitSig-38] - _ = x[InvalidInitDecl-39] - _ = x[InvalidMainDecl-40] - _ = x[TooManyValues-41] - _ = x[NotAnExpr-42] - _ = x[TruncatedFloat-43] - _ = x[NumericOverflow-44] - _ = x[UndefinedOp-45] - _ = x[MismatchedTypes-46] - _ = x[DivByZero-47] - _ = x[NonNumericIncDec-48] - _ = x[UnaddressableOperand-49] - _ = x[InvalidIndirection-50] - _ = x[NonIndexableOperand-51] - _ = x[InvalidIndex-52] - _ = x[SwappedSliceIndices-53] - _ = x[NonSliceableOperand-54] - _ = x[InvalidSliceExpr-55] - _ = x[InvalidShiftCount-56] - _ = x[InvalidShiftOperand-57] - _ = x[InvalidReceive-58] - _ = x[InvalidSend-59] - _ = x[DuplicateLitKey-60] - _ = x[MissingLitKey-61] - _ = x[InvalidLitIndex-62] - _ = x[OversizeArrayLit-63] - _ = x[MixedStructLit-64] - _ = x[InvalidStructLit-65] - _ = x[MissingLitField-66] - _ = x[DuplicateLitField-67] - _ = x[UnexportedLitField-68] - _ = x[InvalidLitField-69] - _ = x[UntypedLit-70] - _ = x[InvalidLit-71] - _ = x[AmbiguousSelector-72] - _ = x[UndeclaredImportedName-73] - _ = x[UnexportedName-74] - _ = x[UndeclaredName-75] - _ = x[MissingFieldOrMethod-76] - _ = x[BadDotDotDotSyntax-77] - _ = x[NonVariadicDotDotDot-78] - _ = x[MisplacedDotDotDot-79] - _ = x[InvalidDotDotDotOperand-80] - _ = x[InvalidDotDotDot-81] - _ = x[UncalledBuiltin-82] - _ = x[InvalidAppend-83] - _ = x[InvalidCap-84] - _ = x[InvalidClose-85] - _ = x[InvalidCopy-86] - _ = x[InvalidComplex-87] - _ = x[InvalidDelete-88] - _ = x[InvalidImag-89] - _ = x[InvalidLen-90] - _ = x[SwappedMakeArgs-91] - _ = x[InvalidMake-92] - _ = x[InvalidReal-93] - _ = x[InvalidAssert-94] - _ = x[ImpossibleAssert-95] - _ = x[InvalidConversion-96] - _ = x[InvalidUntypedConversion-97] - _ = x[BadOffsetofSyntax-98] - _ = x[InvalidOffsetof-99] - _ = x[UnusedExpr-100] - _ = x[UnusedVar-101] - _ = x[MissingReturn-102] - _ = x[WrongResultCount-103] - _ = x[OutOfScopeResult-104] - _ = x[InvalidCond-105] - _ = x[InvalidPostDecl-106] - _ = x[InvalidChanRange-107] - _ = x[InvalidIterVar-108] - _ = x[InvalidRangeExpr-109] - _ = x[MisplacedBreak-110] - _ = x[MisplacedContinue-111] - _ = x[MisplacedFallthrough-112] - _ = x[DuplicateCase-113] - _ = x[DuplicateDefault-114] - _ = x[BadTypeKeyword-115] - _ = x[InvalidTypeSwitch-116] - _ = x[InvalidExprSwitch-117] - _ = x[InvalidSelectCase-118] - _ = x[UndeclaredLabel-119] - _ = x[DuplicateLabel-120] - _ = x[MisplacedLabel-121] - _ = x[UnusedLabel-122] - _ = x[JumpOverDecl-123] - _ = x[JumpIntoBlock-124] - _ = x[InvalidMethodExpr-125] - _ = x[WrongArgCount-126] - _ = x[InvalidCall-127] - _ = x[UnusedResults-128] - _ = x[InvalidDefer-129] - _ = x[InvalidGo-130] - _ = x[BadDecl-131] - _ = x[RepeatedDecl-132] - _ = x[InvalidUnsafeAdd-133] - _ = x[InvalidUnsafeSlice-134] - _ = x[UnsupportedFeature-135] - _ = x[NotAGenericType-136] - _ = x[WrongTypeArgCount-137] - _ = x[CannotInferTypeArgs-138] - _ = x[InvalidTypeArg-139] - _ = x[InvalidInstanceCycle-140] - _ = x[InvalidUnion-141] - _ = x[MisplacedConstraintIface-142] - _ = x[InvalidMethodTypeParams-143] - _ = x[MisplacedTypeParam-144] - _ = x[InvalidUnsafeSliceData-145] - _ = x[InvalidUnsafeString-146] -} - -const ( - _ErrorCode_name_0 = "InvalidSyntaxTree" - _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" -) - -var ( - _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} -) - -func (i ErrorCode) String() string { - switch { - case i == -1: - return _ErrorCode_name_0 - case 1 <= i && i <= 146: - i -= 1 - return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] - default: - return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go deleted file mode 100644 index fea7c8b75..000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typesinternal - -import ( - "go/types" - - "golang.org/x/tools/internal/aliases" -) - -// ReceiverNamed returns the named type (if any) associated with the -// type of recv, which may be of the form N or *N, or aliases thereof. -// It also reports whether a Pointer was present. -func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { - t := recv.Type() - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { - isPtr = true - t = ptr.Elem() - } - named, _ = aliases.Unalias(t).(*types.Named) - return -} - -// Unpointer returns T given *T or an alias thereof. -// For all other types it is the identity function. -// It does not look at underlying types. -// The result may be an alias. -// -// Use this function to strip off the optional pointer on a receiver -// in a field or method selection, without losing the named type -// (which is needed to compute the method set). -// -// See also [typeparams.MustDeref], which removes one level of -// indirection from the type, regardless of named types (analogous to -// a LOAD instruction). -func Unpointer(t types.Type) types.Type { - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { - return ptr.Elem() - } - return t -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/toonew.go b/vendor/golang.org/x/tools/internal/typesinternal/toonew.go deleted file mode 100644 index cc86487ea..000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/toonew.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typesinternal - -import ( - "go/types" - - "golang.org/x/tools/internal/stdlib" - "golang.org/x/tools/internal/versions" -) - -// TooNewStdSymbols computes the set of package-level symbols -// exported by pkg that are not available at the specified version. -// The result maps each symbol to its minimum version. -// -// The pkg is allowed to contain type errors. -func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string { - disallowed := make(map[types.Object]string) - - // Pass 1: package-level symbols. - symbols := stdlib.PackageSymbols[pkg.Path()] - for _, sym := range symbols { - symver := sym.Version.String() - if versions.Before(version, symver) { - switch sym.Kind { - case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type: - disallowed[pkg.Scope().Lookup(sym.Name)] = symver - } - } - } - - // Pass 2: fields and methods. - // - // We allow fields and methods if their associated type is - // disallowed, as otherwise we would report false positives - // for compatibility shims. Consider: - // - // //go:build go1.22 - // type T struct { F std.Real } // correct new API - // - // //go:build !go1.22 - // type T struct { F fake } // shim - // type fake struct { ... } - // func (fake) M () {} - // - // These alternative declarations of T use either the std.Real - // type, introduced in go1.22, or a fake type, for the field - // F. (The fakery could be arbitrarily deep, involving more - // nested fields and methods than are shown here.) Clients - // that use the compatibility shim T will compile with any - // version of go, whether older or newer than go1.22, but only - // the newer version will use the std.Real implementation. - // - // Now consider a reference to method M in new(T).F.M() in a - // module that requires a minimum of go1.21. The analysis may - // occur using a version of Go higher than 1.21, selecting the - // first version of T, so the method M is Real.M. This would - // spuriously cause the analyzer to report a reference to a - // too-new symbol even though this expression compiles just - // fine (with the fake implementation) using go1.21. - for _, sym := range symbols { - symVersion := sym.Version.String() - if !versions.Before(version, symVersion) { - continue // allowed - } - - var obj types.Object - switch sym.Kind { - case stdlib.Field: - typename, name := sym.SplitField() - if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" { - obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name) - } - - case stdlib.Method: - ptr, recvname, name := sym.SplitMethod() - if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" { - obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name) - } - } - if obj != nil { - disallowed[obj] = symVersion - } - } - - return disallowed -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go deleted file mode 100644 index 7c77c2fbc..000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typesinternal provides access to internal go/types APIs that are not -// yet exported. -package typesinternal - -import ( - "go/token" - "go/types" - "reflect" - "unsafe" -) - -func SetUsesCgo(conf *types.Config) bool { - v := reflect.ValueOf(conf).Elem() - - f := v.FieldByName("go115UsesCgo") - if !f.IsValid() { - f = v.FieldByName("UsesCgo") - if !f.IsValid() { - return false - } - } - - addr := unsafe.Pointer(f.UnsafeAddr()) - *(*bool)(addr) = true - - return true -} - -// ReadGo116ErrorData extracts additional information from types.Error values -// generated by Go version 1.16 and later: the error code, start position, and -// end position. If all positions are valid, start <= err.Pos <= end. -// -// If the data could not be read, the final result parameter will be false. -func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { - var data [3]int - // By coincidence all of these fields are ints, which simplifies things. - v := reflect.ValueOf(err) - for i, name := range []string{"go116code", "go116start", "go116end"} { - f := v.FieldByName(name) - if !f.IsValid() { - return 0, 0, 0, false - } - data[i] = int(f.Int()) - } - return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true -} diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go deleted file mode 100644 index b53f17861..000000000 --- a/vendor/golang.org/x/tools/internal/versions/features.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// This file contains predicates for working with file versions to -// decide when a tool should consider a language feature enabled. - -// GoVersions that features in x/tools can be gated to. -const ( - Go1_18 = "go1.18" - Go1_19 = "go1.19" - Go1_20 = "go1.20" - Go1_21 = "go1.21" - Go1_22 = "go1.22" -) - -// Future is an invalid unknown Go version sometime in the future. -// Do not use directly with Compare. -const Future = "" - -// AtLeast reports whether the file version v comes after a Go release. -// -// Use this predicate to enable a behavior once a certain Go release -// has happened (and stays enabled in the future). -func AtLeast(v, release string) bool { - if v == Future { - return true // an unknown future version is always after y. - } - return Compare(Lang(v), Lang(release)) >= 0 -} - -// Before reports whether the file version v is strictly before a Go release. -// -// Use this predicate to disable a behavior once a certain Go release -// has happened (and stays enabled in the future). -func Before(v, release string) bool { - if v == Future { - return false // an unknown future version happens after y. - } - return Compare(Lang(v), Lang(release)) < 0 -} diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go deleted file mode 100644 index bbabcd22e..000000000 --- a/vendor/golang.org/x/tools/internal/versions/gover.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This is a fork of internal/gover for use by x/tools until -// go1.21 and earlier are no longer supported by x/tools. - -package versions - -import "strings" - -// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] -// The numbers are the original decimal strings to avoid integer overflows -// and since there is very little actual math. (Probably overflow doesn't matter in practice, -// but at the time this code was written, there was an existing test that used -// go1.99999999999, which does not fit in an int on 32-bit platforms. -// The "big decimal" representation avoids the problem entirely.) -type gover struct { - major string // decimal - minor string // decimal or "" - patch string // decimal or "" - kind string // "", "alpha", "beta", "rc" - pre string // decimal or "" -} - -// compare returns -1, 0, or +1 depending on whether -// x < y, x == y, or x > y, interpreted as toolchain versions. -// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". -// Malformed versions compare less than well-formed versions and equal to each other. -// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". -func compare(x, y string) int { - vx := parse(x) - vy := parse(y) - - if c := cmpInt(vx.major, vy.major); c != 0 { - return c - } - if c := cmpInt(vx.minor, vy.minor); c != 0 { - return c - } - if c := cmpInt(vx.patch, vy.patch); c != 0 { - return c - } - if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc - return c - } - if c := cmpInt(vx.pre, vy.pre); c != 0 { - return c - } - return 0 -} - -// lang returns the Go language version. For example, lang("1.2.3") == "1.2". -func lang(x string) string { - v := parse(x) - if v.minor == "" || v.major == "1" && v.minor == "0" { - return v.major - } - return v.major + "." + v.minor -} - -// isValid reports whether the version x is valid. -func isValid(x string) bool { - return parse(x) != gover{} -} - -// parse parses the Go version string x into a version. -// It returns the zero version if x is malformed. -func parse(x string) gover { - var v gover - - // Parse major version. - var ok bool - v.major, x, ok = cutInt(x) - if !ok { - return gover{} - } - if x == "" { - // Interpret "1" as "1.0.0". - v.minor = "0" - v.patch = "0" - return v - } - - // Parse . before minor version. - if x[0] != '.' { - return gover{} - } - - // Parse minor version. - v.minor, x, ok = cutInt(x[1:]) - if !ok { - return gover{} - } - if x == "" { - // Patch missing is same as "0" for older versions. - // Starting in Go 1.21, patch missing is different from explicit .0. - if cmpInt(v.minor, "21") < 0 { - v.patch = "0" - } - return v - } - - // Parse patch if present. - if x[0] == '.' { - v.patch, x, ok = cutInt(x[1:]) - if !ok || x != "" { - // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). - // Allowing them would be a bit confusing because we already have: - // 1.21 < 1.21rc1 - // But a prerelease of a patch would have the opposite effect: - // 1.21.3rc1 < 1.21.3 - // We've never needed them before, so let's not start now. - return gover{} - } - return v - } - - // Parse prerelease. - i := 0 - for i < len(x) && (x[i] < '0' || '9' < x[i]) { - if x[i] < 'a' || 'z' < x[i] { - return gover{} - } - i++ - } - if i == 0 { - return gover{} - } - v.kind, x = x[:i], x[i:] - if x == "" { - return v - } - v.pre, x, ok = cutInt(x) - if !ok || x != "" { - return gover{} - } - - return v -} - -// cutInt scans the leading decimal number at the start of x to an integer -// and returns that value and the rest of the string. -func cutInt(x string) (n, rest string, ok bool) { - i := 0 - for i < len(x) && '0' <= x[i] && x[i] <= '9' { - i++ - } - if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero - return "", "", false - } - return x[:i], x[i:], true -} - -// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. -// (Copied from golang.org/x/mod/semver's compareInt.) -func cmpInt(x, y string) int { - if x == y { - return 0 - } - if len(x) < len(y) { - return -1 - } - if len(x) > len(y) { - return +1 - } - if x < y { - return -1 - } else { - return +1 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go deleted file mode 100644 index 377bf7a53..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// toolchain is maximum version (<1.22) that the go toolchain used -// to build the current tool is known to support. -// -// When a tool is built with >=1.22, the value of toolchain is unused. -// -// x/tools does not support building with go <1.18. So we take this -// as the minimum possible maximum. -var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go deleted file mode 100644 index f65beed9d..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package versions - -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go deleted file mode 100644 index 1a9efa126..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 -// +build go1.20 - -package versions - -func init() { - if Compare(toolchain, Go1_20) < 0 { - toolchain = Go1_20 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go deleted file mode 100644 index b7ef216df..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 -// +build go1.21 - -package versions - -func init() { - if Compare(toolchain, Go1_21) < 0 { - toolchain = Go1_21 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go deleted file mode 100644 index 562eef21f..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -import ( - "go/types" -) - -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() - } - return "" -} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index b4345d334..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a language version (<=1.21) derived from runtime.Version() -// or an unknown future version. -func FileVersion(info *types.Info, file *ast.File) string { - // In x/tools built with Go <= 1.21, we do not have Info.FileVersions - // available. We use a go version derived from the toolchain used to - // compile the tool by default. - // This will be <= go1.21. We take this as the maximum version that - // this tool can support. - // - // There are no features currently in x/tools that need to tell fine grained - // differences for versions <1.22. - return toolchain -} - -// InitFileVersions is a noop when compiled with this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index e8180632a..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersions returns a file's Go version. -// The reported version is an unknown Future version if a -// version cannot be determined. -func FileVersion(info *types.Info, file *ast.File) string { - // In tools built with Go >= 1.22, the Go version of a file - // follow a cascades of sources: - // 1) types.Info.FileVersion, which follows the cascade: - // 1.a) file version (ast.File.GoVersion), - // 1.b) the package version (types.Config.GoVersion), or - // 2) is some unknown Future version. - // - // File versions require a valid package version to be provided to types - // in Config.GoVersion. Config.GoVersion is either from the package's module - // or the toolchain (go run). This value should be provided by go/packages - // or unitchecker.Config.GoVersion. - if v := info.FileVersions[file]; IsValid(v) { - return v - } - // Note: we could instead return runtime.Version() [if valid]. - // This would act as a max version on what a tool can support. - return Future -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go deleted file mode 100644 index 8d1f7453d..000000000 --- a/vendor/golang.org/x/tools/internal/versions/versions.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -import ( - "strings" -) - -// Note: If we use build tags to use go/versions when go >=1.22, -// we run into go.dev/issue/53737. Under some operations users would see an -// import of "go/versions" even if they would not compile the file. -// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include -// For this reason, this library just a clone of go/versions for the moment. - -// Lang returns the Go language version for version x. -// If x is not a valid version, Lang returns the empty string. -// For example: -// -// Lang("go1.21rc2") = "go1.21" -// Lang("go1.21.2") = "go1.21" -// Lang("go1.21") = "go1.21" -// Lang("go1") = "go1" -// Lang("bad") = "" -// Lang("1.21") = "" -func Lang(x string) string { - v := lang(stripGo(x)) - if v == "" { - return "" - } - return x[:2+len(v)] // "go"+v without allocation -} - -// Compare returns -1, 0, or +1 depending on whether -// x < y, x == y, or x > y, interpreted as Go versions. -// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". -// Invalid versions, including the empty string, compare less than -// valid versions and equal to each other. -// The language version "go1.21" compares less than the -// release candidate and eventual releases "go1.21rc1" and "go1.21.0". -// Custom toolchain suffixes are ignored during comparison: -// "go1.21.0" and "go1.21.0-bigcorp" are equal. -func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } - -// IsValid reports whether the version x is valid. -func IsValid(x string) bool { return isValid(stripGo(x)) } - -// stripGo converts from a "go1.21" version to a "1.21" version. -// If v does not start with "go", stripGo returns the empty string (a known invalid version). -func stripGo(v string) string { - v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. - if len(v) < 2 || v[:2] != "go" { - return "" - } - return v[2:] -} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index b6dbace4c..e6c4fe90d 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -42,6 +42,26 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { return creds, nil } +// GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport. +// The OAuth2 transport and endpoint will be configured for mTLS if applicable. +func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return "", nil, err + } + tokenURL := oAuth2Endpoint(clientCertSource) + var oauth2Client *http.Client + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + oauth2Client = customHTTPClient(tlsConfig) + } else { + oauth2Client = oauth2.NewClient(ctx, nil) + } + return tokenURL, oauth2Client, nil +} + func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) { // Preserve old options behavior if settings.InternalCredentials != nil { @@ -80,13 +100,18 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti aud = settings.DefaultAudience } + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, settings) + if err != nil { + return nil, err + } creds, err := credentials.DetectDefault(&credentials.DetectOptions{ Scopes: scopes, Audience: aud, CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, - Client: oauth2.NewClient(ctx, nil), + TokenURL: tokenURL, + Client: oauth2Client, }) if err != nil { return nil, err @@ -147,19 +172,12 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g var params google.CredentialsParams params.Scopes = ds.GetScopes() - // Determine configurations for the OAuth2 transport, which is separate from the API transport. - // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, err := getClientCertificateSource(ds) + tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, ds) if err != nil { return nil, err } - params.TokenURL = oAuth2Endpoint(clientCertSource) - if clientCertSource != nil { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) - } + params.TokenURL = tokenURL + ctx = context.WithValue(ctx, oauth2.HTTPClient, oauth2Client) // By default, a standard OAuth 2.0 token source is created cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) diff --git a/vendor/google.golang.org/api/internal/gensupport/retry.go b/vendor/google.golang.org/api/internal/gensupport/retry.go index 20b57d925..089ee3189 100644 --- a/vendor/google.golang.org/api/internal/gensupport/retry.go +++ b/vendor/google.golang.org/api/internal/gensupport/retry.go @@ -8,6 +8,7 @@ import ( "errors" "io" "net" + "net/url" "strings" "time" @@ -29,8 +30,6 @@ var ( backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} } - // syscallRetryable is a platform-specific hook, specified in retryable_linux.go - syscallRetryable func(error) bool = func(err error) bool { return false } ) const ( @@ -56,30 +55,33 @@ func shouldRetry(status int, err error) bool { if status == statusTooManyRequests || status == statusRequestTimeout { return true } - if err == io.ErrUnexpectedEOF { + if errors.Is(err, io.ErrUnexpectedEOF) { return true } - // Transient network errors should be retried. - if syscallRetryable(err) { + if errors.Is(err, net.ErrClosed) { return true } - if err, ok := err.(interface{ Temporary() bool }); ok { - if err.Temporary() { - return true + switch e := err.(type) { + case *net.OpError, *url.Error: + // Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset", "broken pipe"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } } - } - var opErr *net.OpError - if errors.As(err, &opErr) { - if strings.Contains(opErr.Error(), "use of closed network connection") { - // TODO: check against net.ErrClosed (go 1.16+) instead of string + case interface{ Temporary() bool }: + if e.Temporary() { return true } } - // If Go 1.13 error unwrapping is available, use this to examine wrapped + // If error unwrapping is available, use this to examine wrapped // errors. - if err, ok := err.(interface{ Unwrap() error }); ok { - return shouldRetry(status, err.Unwrap()) + if e, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, e.Unwrap()) } return false } diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go deleted file mode 100644 index a916c3da2..000000000 --- a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package gensupport - -import "syscall" - -func init() { - // Initialize syscallRetryable to return true on transient socket-level - // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } -} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index f39dd00d9..f6716134e 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -48,8 +48,24 @@ func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (* if ctx != nil { headers := callctx.HeadersFromContext(ctx) for k, vals := range headers { - for _, v := range vals { - req.Header.Add(k, v) + if k == "x-goog-api-client" { + // Merge all values into a single "x-goog-api-client" header. + var mergedVal strings.Builder + baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + if baseXGoogHeader != "" { + mergedVal.WriteString(baseXGoogHeader) + mergedVal.WriteRune(' ') + } + for _, v := range vals { + mergedVal.WriteString(v) + mergedVal.WriteRune(' ') + } + // Remove the last space and replace the header on the request. + req.Header.Set(k, mergedVal.String()[:mergedVal.Len()-1]) + } else { + for _, v := range vals { + req.Header.Add(k, v) + } } } } @@ -118,7 +134,9 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var err error attempts := 1 invocationID := uuid.New().String() - baseXGoogHeader := req.Header.Get("X-Goog-Api-Client") + + xGoogHeaderVals := req.Header.Values("X-Goog-Api-Client") + baseXGoogHeader := strings.Join(xGoogHeaderVals, " ") // Loop to retry the request, up to the context deadline. var pause time.Duration diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index caf844136..a09f254c7 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.183.0" +const Version = "0.186.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 160800c2b..d336c44cc 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -33,7 +33,7 @@ "location": "me-central2" } ], - "etag": "\"3132383134303835313436343635393933303731\"", + "etag": "\"39393431363637393939313737343036323439\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -4075,7 +4075,7 @@ } } }, - "revision": "20240524", + "revision": "20240621", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { @@ -5007,6 +5007,11 @@ "description": "The response message for storage.buckets.operations.list.", "id": "GoogleLongrunningListOperationsResponse", "properties": { + "kind": { + "default": "storage#operations", + "description": "The kind of item this is. For lists of operations, this is always storage#operations.", + "type": "string" + }, "nextPageToken": { "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", "type": "string" @@ -5033,6 +5038,11 @@ "$ref": "GoogleRpcStatus", "description": "The error result of the operation in case of failure or cancellation." }, + "kind": { + "default": "storage#operation", + "description": "The kind of item this is. For operations, this is always storage#operation.", + "type": "string" + }, "metadata": { "additionalProperties": { "description": "Properties of the object. Contains field @type with type URL.", @@ -5052,6 +5062,10 @@ }, "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", "type": "object" + }, + "selfLink": { + "description": "The link to this long running operation.", + "type": "string" } }, "type": "object" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index b4d425e59..896bcb9c3 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1682,6 +1682,9 @@ func (s *Folders) MarshalJSON() ([]byte, error) { // GoogleLongrunningListOperationsResponse: The response message for // storage.buckets.operations.list. type GoogleLongrunningListOperationsResponse struct { + // Kind: The kind of item this is. For lists of operations, this is always + // storage#operations. + Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, used to page through large result // sets. Provide this value in a subsequent request to return the next page of // results. @@ -1692,15 +1695,15 @@ type GoogleLongrunningListOperationsResponse struct { // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "NextPageToken") to - // unconditionally include in API requests. By default, fields with empty or - // default values are omitted from API requests. See + // ForceSendFields is a list of field names (e.g. "Kind") to unconditionally + // include in API requests. By default, fields with empty or default values are + // omitted from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more // details. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "NextPageToken") to include in API - // requests with the JSON null value. By default, fields with empty values are - // omitted from API requests. See + // NullFields is a list of field names (e.g. "Kind") to include in API requests + // with the JSON null value. By default, fields with empty values are omitted + // from API requests. See // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. NullFields []string `json:"-"` } @@ -1719,6 +1722,9 @@ type GoogleLongrunningOperation struct { Done bool `json:"done,omitempty"` // Error: The error result of the operation in case of failure or cancellation. Error *GoogleRpcStatus `json:"error,omitempty"` + // Kind: The kind of item this is. For operations, this is always + // storage#operation. + Kind string `json:"kind,omitempty"` // Metadata: Service-specific metadata associated with the operation. It // typically contains progress information and common metadata such as create // time. Some services might not provide such metadata. Any method that returns @@ -1736,6 +1742,8 @@ type GoogleLongrunningOperation struct { // method name. For example, if the original method name is "TakeSnapshot()", // the inferred response type is "TakeSnapshotResponse". Response googleapi.RawMessage `json:"response,omitempty"` + // SelfLink: The link to this long running operation. + SelfLink string `json:"selfLink,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 2e66d02b3..2d4f90c9c 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -218,6 +218,11 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna defaultEndpointTemplate = ds.DefaultEndpoint } + tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) + if err != nil { + return nil, err + } + pool, err := grpctransport.Dial(ctx, secure, &grpctransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, @@ -231,7 +236,8 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - Client: oauth2.NewClient(ctx, nil), + TokenURL: tokenURL, + Client: oauth2Client, }, InternalOptions: &grpctransport.InternalOptions{ EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index d1cd83b62..a36e24315 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -107,6 +107,10 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. if ds.RequestReason != "" { headers.Set("X-goog-request-reason", ds.RequestReason) } + tokenURL, oauth2Client, err := internal.GetOAuth2Configuration(ctx, ds) + if err != nil { + return nil, err + } client, err := httptransport.NewClient(&httptransport.Options{ DisableTelemetry: ds.TelemetryDisabled, DisableAuthentication: ds.NoAuth, @@ -121,7 +125,8 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, - Client: oauth2.NewClient(ctx, nil), + TokenURL: tokenURL, + Client: oauth2Client, }, InternalOptions: &httptransport.InternalOptions{ EnableJWTWithScope: ds.EnableJwtWithScope, diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index f47902371..bb2966e3b 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -102,7 +102,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -114,7 +114,7 @@ func (d decoder) unexpectedTokenError(tok json.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112bc..24bc98ac4 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index d2b3ac031..ea1d3e65a 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -214,7 +214,7 @@ func (d *Decoder) parseNext() (Token, error) { // newSyntaxError returns an error with line and column information useful for // syntax errors. -func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(pos) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e786..099b2bf45 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index d96719829..c2d6bd526 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index ece53bea3..df53ff40b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -383,6 +383,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 3bc3b1cdf..8a57d60b0 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -534,7 +534,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 570181eb4..e56c91a8d 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4ef..ba83fea44 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 1447a1198..f30ab6b58 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -860,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -873,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98de..5d5771c2e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041edc..f29e6a8fa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c2..4bb0a7a20 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -99,6 +99,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf1..7a16ec13d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2efa..e06ece55a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f89136516..18cb96fd7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a057..304244a65 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6e..febd21224 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0bae..e31249f64 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c1c33d005..81b2b1a76 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 950e9a1fe..bf0b6049b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -216,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -567,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdce..019399d45 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -35,7 +35,7 @@ type MessageInfo struct { Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index a6f0dbdad..ecb4623d7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -255,6 +255,10 @@ func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 @@ -389,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -417,7 +421,7 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 29ba6bd35..99dc23c6f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,7 +23,7 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { @@ -154,7 +154,7 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 517e94434..da685e8a2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -16,7 +16,7 @@ import ( const UnsafeEnabled = false // Pointer is an opaque pointer type. -type Pointer interface{} +type Pointer any // offset represents the offset to a struct field, accessible from a pointer. // The offset is the field index into a struct. @@ -62,7 +62,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { return pointer{v: reflect.ValueOf(v)} } @@ -93,7 +93,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e311..5f20ca5d8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -50,7 +50,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +80,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e5..a1f09162d 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a3cba5080..dbbf1f686 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 34 - Patch = 1 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index c9c8721a6..d248f2928 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,7 +39,7 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -51,7 +51,7 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -78,7 +78,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index 254ca5854..f3cebab29 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -46,6 +46,11 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } + if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + f.L1.Kind = protoreflect.MessageKind + } if fd.DefaultValue != nil { v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) if err != nil { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index c62930867..6de31c2eb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -116,18 +116,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if m.ExtensionRanges().Len() > 0 { return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } } for j, fd := range md.GetField() { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 00102d311..ea154eec4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -485,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5b80afe52..cd8fadbaf 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 7ced876f4..75f83a2af 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -32,11 +32,11 @@ const ( type value struct { pragma.DoNotCompare // 0B - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface any // 16B } func valueOfString(v string) Value { @@ -45,7 +45,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, bin: v} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { return Value{typ: ifaceType, iface: v} } @@ -55,6 +55,6 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return v.bin } -func (v Value) getIface() interface{} { +func (v Value) getIface() any { return v.iface } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 160309731..9fe83cef5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e8..7f3583ead 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -45,7 +45,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +80,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +93,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 435470111..f7d386990 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -15,7 +15,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +37,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +70,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +81,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52a..de1777339 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 10c9030eb..9403eb075 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -3012,6 +3012,8 @@ type EnumValueOptions struct { // out when using debug formats, e.g. when the field contains sensitive // credentials. DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + // Information about the support window of a feature value. + FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -3075,6 +3077,13 @@ func (x *EnumValueOptions) GetDebugRedact() bool { return Default_EnumValueOptions_DebugRedact } +func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { + if x != nil { + return x.FeatureSupport + } + return nil +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -4706,7 +4715,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, @@ -4779,438 +4788,445 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, - 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, - 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, - 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, - 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, - 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, - 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, - 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, - 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, - 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, - 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, - 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, - 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, - 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, - 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, - 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, - 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, - 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, - 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, - 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, - 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, - 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, - 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, - 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, - 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, - 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, - 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, - 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, - 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, - 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, + 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, + 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, + 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, + 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, - 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, - 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, - 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, - 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x0a, 0x0a, - 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, - 0x3f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, - 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, - 0x6c, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, - 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x42, 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, - 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, - 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, - 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, - 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, - 0x45, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x26, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, - 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, - 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, - 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, - 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, - 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, - 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, - 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, - 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, - 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, - 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, - 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, - 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, - 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, - 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, - 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, - 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, - 0x03, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, - 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, - 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, - 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, - 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, - 0x2a, 0x06, 0x08, 0xea, 0x07, 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x86, 0x4e, 0x10, 0x87, 0x4e, - 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, - 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xd9, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, - 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, - 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, - 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, - 0xe2, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, + 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, + 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, + 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, - 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, - 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, - 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, - 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, - 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, - 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, + 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, + 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, + 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, + 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, + 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, + 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, + 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, + 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, + 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, + 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, + 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, + 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, + 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, + 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, + 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, + 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, + 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, + 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, + 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, + 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, + 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, + 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, + 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, + 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, + 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, + 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, + 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, + 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, + 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, + 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, + 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, + 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, + 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, + 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, + 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, + 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, + 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, + 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, + 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, + 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, + 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, + 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, + 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, + 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, + 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, + 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -5227,7 +5243,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ +var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type @@ -5329,38 +5345,39 @@ var file_google_protobuf_descriptor_proto_depIdxs = []int32{ 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 49: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 50: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 51: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 52: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 53: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 54: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 55: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 56: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 57: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 58: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 59: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 60: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 61: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 62: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 63: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 64: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 65: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 66: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 67: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 68: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 69: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 75: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 76, // [76:76] is the sub-list for method output_type - 76, // [76:76] is the sub-list for method input_type - 76, // [76:76] is the sub-list for extension type_name - 76, // [76:76] is the sub-list for extension extendee - 0, // [0:76] is the sub-list for field type_name + 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 77, // [77:77] is the sub-list for method output_type + 77, // [77:77] is the sub-list for method input_type + 77, // [77:77] is the sub-list for extension type_name + 77, // [77:77] is the sub-list for extension extendee + 0, // [0:77] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5369,7 +5386,7 @@ func file_google_protobuf_descriptor_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorSet); i { case 0: return &v.state @@ -5381,7 +5398,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*FileDescriptorProto); i { case 0: return &v.state @@ -5393,7 +5410,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto); i { case 0: return &v.state @@ -5405,7 +5422,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions); i { case 0: return &v.state @@ -5419,7 +5436,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*FieldDescriptorProto); i { case 0: return &v.state @@ -5431,7 +5448,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OneofDescriptorProto); i { case 0: return &v.state @@ -5443,7 +5460,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto); i { case 0: return &v.state @@ -5455,7 +5472,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*EnumValueDescriptorProto); i { case 0: return &v.state @@ -5467,7 +5484,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*ServiceDescriptorProto); i { case 0: return &v.state @@ -5479,7 +5496,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*MethodDescriptorProto); i { case 0: return &v.state @@ -5491,7 +5508,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*FileOptions); i { case 0: return &v.state @@ -5505,7 +5522,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*MessageOptions); i { case 0: return &v.state @@ -5519,7 +5536,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions); i { case 0: return &v.state @@ -5533,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*OneofOptions); i { case 0: return &v.state @@ -5547,7 +5564,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*EnumOptions); i { case 0: return &v.state @@ -5561,7 +5578,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*EnumValueOptions); i { case 0: return &v.state @@ -5575,7 +5592,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ServiceOptions); i { case 0: return &v.state @@ -5589,7 +5606,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { switch v := v.(*MethodOptions); i { case 0: return &v.state @@ -5603,7 +5620,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption); i { case 0: return &v.state @@ -5615,7 +5632,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { switch v := v.(*FeatureSet); i { case 0: return &v.state @@ -5629,7 +5646,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state @@ -5641,7 +5658,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo); i { case 0: return &v.state @@ -5653,7 +5670,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state @@ -5665,7 +5682,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state @@ -5677,7 +5694,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state @@ -5689,7 +5706,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state @@ -5701,7 +5718,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state @@ -5713,7 +5730,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_EditionDefault); i { case 0: return &v.state @@ -5725,7 +5742,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { switch v := v.(*FieldOptions_FeatureSupport); i { case 0: return &v.state @@ -5737,7 +5754,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state @@ -5749,7 +5766,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { case 0: return &v.state @@ -5761,7 +5778,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { switch v := v.(*SourceCodeInfo_Location); i { case 0: return &v.state @@ -5773,7 +5790,7 @@ func file_google_protobuf_descriptor_proto_init() { return nil } } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index b0df3fb33..a2ca940c5 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -90,27 +90,27 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x7d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75, 0x65, 0x18, 0x84, - 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7, 0x07, 0xb2, 0x01, - 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x53, 0x4f, - 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, - 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x4a, 0x73, 0x6f, - 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, - 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, - 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x70, 0x62, + 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, + 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, + 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, + 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, + 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, } var ( @@ -126,7 +126,7 @@ func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { } var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_go_features_proto_goTypes = []interface{}{ +var file_google_protobuf_go_features_proto_goTypes = []any{ (*GoFeatures)(nil), // 0: pb.GoFeatures (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet } @@ -146,7 +146,7 @@ func file_google_protobuf_go_features_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GoFeatures); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 9de51be54..7172b43d3 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -445,7 +445,7 @@ func file_google_protobuf_any_proto_rawDescGZIP() []byte { } var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ +var file_google_protobuf_any_proto_goTypes = []any{ (*Any)(nil), // 0: google.protobuf.Any } var file_google_protobuf_any_proto_depIdxs = []int32{ @@ -462,7 +462,7 @@ func file_google_protobuf_any_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Any); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index df709a8dd..1b71bcd91 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -323,7 +323,7 @@ func file_google_protobuf_duration_proto_rawDescGZIP() []byte { } var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ +var file_google_protobuf_duration_proto_goTypes = []any{ (*Duration)(nil), // 0: google.protobuf.Duration } var file_google_protobuf_duration_proto_depIdxs = []int32{ @@ -340,7 +340,7 @@ func file_google_protobuf_duration_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Duration); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 9a7277ba3..d87b4fb82 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -115,7 +115,7 @@ func file_google_protobuf_empty_proto_rawDescGZIP() []byte { } var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_empty_proto_goTypes = []interface{}{ +var file_google_protobuf_empty_proto_goTypes = []any{ (*Empty)(nil), // 0: google.protobuf.Empty } var file_google_protobuf_empty_proto_depIdxs = []int32{ @@ -132,7 +132,7 @@ func file_google_protobuf_empty_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e8789cb33..ac1e91bb6 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -537,7 +537,7 @@ func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { } var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ +var file_google_protobuf_field_mask_proto_goTypes = []any{ (*FieldMask)(nil), // 0: google.protobuf.FieldMask } var file_google_protobuf_field_mask_proto_depIdxs = []int32{ @@ -554,7 +554,7 @@ func file_google_protobuf_field_mask_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*FieldMask); i { case 0: return &v.state diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a336..83a5a645b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -332,7 +332,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -349,7 +349,7 @@ func file_google_protobuf_timestamp_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Timestamp); i { case 0: return &v.state diff --git a/vendor/modules.txt b/vendor/modules.txt index c77b92eb9..9a854b0ec 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,10 +1,10 @@ -# cloud.google.com/go v0.114.0 +# cloud.google.com/go v0.115.0 ## explicit; go 1.20 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.5.1 +# cloud.google.com/go/auth v0.6.0 ## explicit; go 1.20 cloud.google.com/go/auth cloud.google.com/go/auth/credentials @@ -23,7 +23,7 @@ cloud.google.com/go/auth/internal/transport/cert # cloud.google.com/go/auth/oauth2adapt v0.2.2 ## explicit; go 1.19 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute v1.27.0 +# cloud.google.com/go/compute v1.27.1 ## explicit; go 1.20 cloud.google.com/go/compute/apiv1 cloud.google.com/go/compute/apiv1/computepb @@ -167,8 +167,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/public ## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal -# github.com/Microsoft/go-winio v0.6.1 -## explicit; go 1.17 +# github.com/Microsoft/go-winio v0.6.2 +## explicit; go 1.21 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/backuptar github.com/Microsoft/go-winio/internal/fs @@ -176,8 +176,8 @@ github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.12.0-rc.3 -## explicit; go 1.18 +# github.com/Microsoft/hcsshim v0.12.3 +## explicit; go 1.21 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage github.com/Microsoft/hcsshim/internal/cow @@ -212,7 +212,7 @@ github.com/acarl005/stripansi # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.54.2 +# github.com/aws/aws-sdk-go v1.54.10 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -283,7 +283,7 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/containerd/cgroups/v3 v3.0.2 +# github.com/containerd/cgroups/v3 v3.0.3 ## explicit; go 1.18 github.com/containerd/cgroups/v3/cgroup1/stats # github.com/containerd/errdefs v0.1.0 @@ -293,11 +293,11 @@ github.com/containerd/errdefs ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/containers/common v0.58.3 -## explicit; go 1.20 +# github.com/containers/common v0.59.1 +## explicit; go 1.21 github.com/containers/common/pkg/retry -# github.com/containers/image/v5 v5.30.1 -## explicit; go 1.19 +# github.com/containers/image/v5 v5.31.1 +## explicit; go 1.21 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -317,6 +317,7 @@ github.com/containers/image/v5/internal/imagesource/impl github.com/containers/image/v5/internal/imagesource/stubs github.com/containers/image/v5/internal/iolimits github.com/containers/image/v5/internal/manifest +github.com/containers/image/v5/internal/multierr github.com/containers/image/v5/internal/pkg/platform github.com/containers/image/v5/internal/private github.com/containers/image/v5/internal/putblobdigest @@ -363,7 +364,7 @@ github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 ## explicit github.com/containers/libtrust -# github.com/containers/ocicrypt v1.1.9 +# github.com/containers/ocicrypt v1.1.10 ## explicit; go 1.20 github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher @@ -379,14 +380,13 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/storage v1.53.0 -## explicit; go 1.20 +# github.com/containers/storage v1.54.0 +## explicit; go 1.21 github.com/containers/storage github.com/containers/storage/drivers github.com/containers/storage/drivers/aufs github.com/containers/storage/drivers/btrfs github.com/containers/storage/drivers/copy -github.com/containers/storage/drivers/devmapper github.com/containers/storage/drivers/overlay github.com/containers/storage/drivers/overlayutils github.com/containers/storage/drivers/quota @@ -402,9 +402,7 @@ github.com/containers/storage/pkg/chunked/dump github.com/containers/storage/pkg/chunked/internal github.com/containers/storage/pkg/chunked/toc github.com/containers/storage/pkg/config -github.com/containers/storage/pkg/devicemapper github.com/containers/storage/pkg/directory -github.com/containers/storage/pkg/dmesg github.com/containers/storage/pkg/fileutils github.com/containers/storage/pkg/fsutils github.com/containers/storage/pkg/fsverity @@ -418,7 +416,6 @@ github.com/containers/storage/pkg/longpath github.com/containers/storage/pkg/loopback github.com/containers/storage/pkg/mount github.com/containers/storage/pkg/parsers -github.com/containers/storage/pkg/parsers/kernel github.com/containers/storage/pkg/pools github.com/containers/storage/pkg/promise github.com/containers/storage/pkg/reexec @@ -440,10 +437,10 @@ github.com/coreos/go-systemd/journal # github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer -# github.com/cyphar/filepath-securejoin v0.2.4 +# github.com/cyphar/filepath-securejoin v0.2.5 ## explicit; go 1.13 github.com/cyphar/filepath-securejoin -# github.com/davecgh/go-spew v1.1.1 +# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew # github.com/deepmap/oapi-codegen v1.8.2 @@ -457,7 +454,7 @@ github.com/deepmap/oapi-codegen/pkg/util # github.com/dimchansky/utfbom v1.1.1 ## explicit github.com/dimchansky/utfbom -# github.com/distribution/reference v0.5.0 +# github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference # github.com/docker/distribution v2.8.3+incompatible @@ -465,7 +462,7 @@ github.com/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v25.0.5+incompatible +# github.com/docker/docker v26.1.3+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -487,7 +484,6 @@ github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client github.com/docker/docker/errdefs -github.com/docker/docker/image/spec/specs-go/v1 github.com/docker/docker/internal/multierror # github.com/docker/docker-credential-helpers v0.8.1 ## explicit; go 1.19 @@ -540,8 +536,8 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/analysis v0.21.4 -## explicit; go 1.13 +# github.com/go-openapi/analysis v0.23.0 +## explicit; go 1.20 github.com/go-openapi/analysis github.com/go-openapi/analysis/internal/debug github.com/go-openapi/analysis/internal/flatten/normalize @@ -549,33 +545,33 @@ github.com/go-openapi/analysis/internal/flatten/operations github.com/go-openapi/analysis/internal/flatten/replace github.com/go-openapi/analysis/internal/flatten/schutils github.com/go-openapi/analysis/internal/flatten/sortref -# github.com/go-openapi/errors v0.21.1 -## explicit; go 1.19 +# github.com/go-openapi/errors v0.22.0 +## explicit; go 1.20 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.19.6 -## explicit; go 1.13 +# github.com/go-openapi/jsonpointer v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.2 -## explicit; go 1.13 +# github.com/go-openapi/jsonreference v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/loads v0.21.2 -## explicit; go 1.13 +# github.com/go-openapi/loads v0.22.0 +## explicit; go 1.20 github.com/go-openapi/loads -# github.com/go-openapi/runtime v0.26.0 -## explicit; go 1.18 +# github.com/go-openapi/runtime v0.28.0 +## explicit; go 1.20 github.com/go-openapi/runtime -# github.com/go-openapi/spec v0.20.9 -## explicit; go 1.13 +# github.com/go-openapi/spec v0.21.0 +## explicit; go 1.20 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.22.2 -## explicit; go 1.19 +# github.com/go-openapi/strfmt v0.23.0 +## explicit; go 1.20 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.10 -## explicit; go 1.19 +# github.com/go-openapi/swag v0.23.0 +## explicit; go 1.20 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.22.1 -## explicit; go 1.14 +# github.com/go-openapi/validate v0.24.0 +## explicit; go 1.20 github.com/go-openapi/validate # github.com/gobwas/glob v0.2.3 ## explicit @@ -617,7 +613,7 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.19.0 +# github.com/google/go-containerregistry v0.19.1 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 @@ -655,8 +651,8 @@ github.com/google/uuid ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.12.4 -## explicit; go 1.19 +# github.com/googleapis/gax-go/v2 v2.12.5 +## explicit; go 1.20 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto @@ -694,7 +690,7 @@ github.com/hashicorp/go-multierror # github.com/hashicorp/go-retryablehttp v0.7.7 ## explicit; go 1.19 github.com/hashicorp/go-retryablehttp -# github.com/hashicorp/go-version v1.6.0 +# github.com/hashicorp/go-version v1.7.0 ## explicit github.com/hashicorp/go-version # github.com/inconshreveable/mousetrap v1.1.0 @@ -743,7 +739,7 @@ github.com/json-iterator/go # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/klauspost/compress v1.17.7 +# github.com/klauspost/compress v1.17.8 ## explicit; go 1.20 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -797,9 +793,6 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/mattn/go-shellwords v1.0.12 -## explicit; go 1.13 -github.com/mattn/go-shellwords # github.com/mattn/go-sqlite3 v1.14.22 ## explicit; go 1.19 github.com/mattn/go-sqlite3 @@ -819,6 +812,9 @@ github.com/mitchellh/go-homedir # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure +# github.com/moby/docker-image-spec v1.3.1 +## explicit; go 1.18 +github.com/moby/docker-image-spec/specs-go/v1 # github.com/moby/sys/mountinfo v0.7.1 ## explicit; go 1.16 github.com/moby/sys/mountinfo @@ -866,8 +862,8 @@ github.com/oracle/oci-go-sdk/v54/identity github.com/oracle/oci-go-sdk/v54/objectstorage github.com/oracle/oci-go-sdk/v54/objectstorage/transfer github.com/oracle/oci-go-sdk/v54/workrequests -# github.com/osbuild/images v0.66.0 -## explicit; go 1.20 +# github.com/osbuild/images v0.69.0 +## explicit; go 1.21 github.com/osbuild/images/internal/common github.com/osbuild/images/internal/environment github.com/osbuild/images/internal/pathpolicy @@ -926,7 +922,7 @@ github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pmezard/go-difflib v1.0.0 +# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib # github.com/proglottis/gpgme v0.1.3 @@ -938,11 +934,11 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.5.0 +# github.com/prometheus/client_model v0.6.0 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.48.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.51.1 +## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model @@ -951,7 +947,7 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/rivo/uniseg v0.4.4 +# github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg # github.com/secure-systems-lab/go-securesystemslib v0.8.0 @@ -960,13 +956,13 @@ github.com/secure-systems-lab/go-securesystemslib/encrypted # github.com/segmentio/ksuid v1.0.4 ## explicit; go 1.12 github.com/segmentio/ksuid -# github.com/sigstore/fulcio v1.4.3 -## explicit; go 1.20 +# github.com/sigstore/fulcio v1.4.5 +## explicit; go 1.21 github.com/sigstore/fulcio/pkg/certificate -# github.com/sigstore/rekor v1.2.2 -## explicit; go 1.19 +# github.com/sigstore/rekor v1.3.6 +## explicit; go 1.21 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.8.2 +# github.com/sigstore/sigstore v1.8.3 ## explicit; go 1.20 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature @@ -988,16 +984,16 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 -## explicit +# github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 +## explicit; go 1.19 github.com/stefanberger/go-pkcs11uri # github.com/stretchr/testify v1.9.0 ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/require github.com/stretchr/testify/suite -# github.com/sylabs/sif/v2 v2.15.1 -## explicit; go 1.20 +# github.com/sylabs/sif/v2 v2.16.0 +## explicit; go 1.21 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit @@ -1012,7 +1008,7 @@ github.com/titanous/rocacheck ## explicit github.com/ubccr/kerby github.com/ubccr/kerby/khttp -# github.com/ulikunitz/xz v0.5.11 +# github.com/ulikunitz/xz v0.5.12 ## explicit; go 1.12 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash @@ -1029,13 +1025,13 @@ github.com/valyala/fasttemplate github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.7.2 +# github.com/vbauerster/mpb/v8 v8.7.3 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter github.com/vbauerster/mpb/v8/decor github.com/vbauerster/mpb/v8/internal -# github.com/vmware/govmomi v0.37.3 +# github.com/vmware/govmomi v0.38.0 ## explicit; go 1.19 github.com/vmware/govmomi github.com/vmware/govmomi/cns @@ -1171,7 +1167,7 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 +# golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -1239,29 +1235,17 @@ golang.org/x/text/width golang.org/x/time/rate # golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d ## explicit; go 1.19 -golang.org/x/tools/cmd/stringer golang.org/x/tools/go/ast/astutil -golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/packagesdriver -golang.org/x/tools/go/packages -golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports -golang.org/x/tools/internal/aliases golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label -golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports -golang.org/x/tools/internal/packagesinternal -golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/stdlib -golang.org/x/tools/internal/tokeninternal -golang.org/x/tools/internal/typesinternal -golang.org/x/tools/internal/versions -# google.golang.org/api v0.183.0 +# google.golang.org/api v0.186.0 ## explicit; go 1.20 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1279,16 +1263,16 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto v0.0.0-20240528184218-531527333157 +# google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 ## explicit; go 1.20 google.golang.org/genproto/googleapis/cloud/extendedops google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 +# google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 ## explicit; go 1.20 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 ## explicit; go 1.20 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails @@ -1360,8 +1344,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.34.1 -## explicit; go 1.17 +# google.golang.org/protobuf v1.34.2 +## explicit; go 1.20 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext
`*PbmObjectType***`key value**
virtualMachine_virtual-machine-MOR_