diff --git a/go.mod b/go.mod index cf711a87f..f7f817052 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.21.11 exclude github.com/mattn/go-sqlite3 v2.0.3+incompatible require ( - cloud.google.com/go/compute v1.27.5 + cloud.google.com/go/compute v1.28.0 cloud.google.com/go/storage v1.43.0 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 @@ -46,7 +46,7 @@ require ( github.com/labstack/gommon v0.4.2 github.com/openshift-online/ocm-sdk-go v0.1.432 github.com/oracle/oci-go-sdk/v54 v54.0.0 - github.com/osbuild/images v0.77.0 + github.com/osbuild/images v0.79.0 github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1 github.com/osbuild/pulp-client v0.1.0 github.com/prometheus/client_golang v1.19.1 @@ -55,18 +55,18 @@ require ( github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 github.com/ubccr/kerby v0.0.0-20170626144437-201a958fc453 - github.com/vmware/govmomi v0.39.0 - golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 + github.com/vmware/govmomi v0.42.0 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 golang.org/x/sys v0.24.0 - google.golang.org/api v0.191.0 + google.golang.org/api v0.193.0 ) require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.8.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect cloud.google.com/go/iam v1.1.12 // indirect dario.cat/mergo v1.0.0 // indirect @@ -101,12 +101,12 @@ require ( github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect - github.com/containers/common v0.60.0 // indirect - github.com/containers/image/v5 v5.32.0 // indirect + github.com/containers/common v0.60.2 // indirect + github.com/containers/image/v5 v5.32.2 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.0 // indirect github.com/containers/storage v1.55.0 // indirect @@ -139,7 +139,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.0 // indirect + github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-containerregistry v0.20.0 // indirect @@ -172,7 +172,7 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/microcosm-cc/bluemonday v1.0.23 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect @@ -214,15 +214,15 @@ require ( github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/vbauerster/mpb/v8 v8.7.4 // indirect + github.com/vbauerster/mpb/v8 v8.7.5 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/mod v0.20.0 // indirect golang.org/x/net v0.28.0 // indirect @@ -230,10 +230,10 @@ require ( golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.6.0 // indirect golang.org/x/tools v0.24.0 // indirect - google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.64.1 // indirect + google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index e9bd0c9f8..da7bd60f0 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.8.0 h1:y8jUJLl/Fg+qNBWxP/Hox2ezJvjkrPb952PC1p0G6A4= -cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= -cloud.google.com/go/compute v1.27.5 h1:iii9Z+FhEeZ5cUkGOEqU+GM7MJSyxMgbE7H7j+JndYY= -cloud.google.com/go/compute v1.27.5/go.mod h1:DfwDGujFTdSeiE8b8ZqadF/uxHFBz+ekGsk8Zfi9dTA= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.0 h1:cYhKl1JUhynmxjXfrk4qdPc6Amw7i+GC9VLflgT0p5M= +cloud.google.com/go/auth v0.9.0/go.mod h1:2HsApZBr9zGZhC9QAXsYVYaWk8kNUt37uny+XVKi7wM= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute v1.28.0 h1:OPtBxMcheSS+DWfci803qvPly3d4w7Eu5ztKBcFfzwk= +cloud.google.com/go/compute v1.28.0/go.mod h1:DEqZBtYrDnD5PvjsKwb3onnhX+qjdCVM7eshj1XdjV4= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= @@ -133,8 +133,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= @@ -147,10 +147,10 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= -github.com/containers/common v0.60.0 h1:QMNygqiiit9LU/yqee9Dv0N0oQ+rQq41ElfdOBQtw7w= -github.com/containers/common v0.60.0/go.mod h1:dtKVe11xkV89tqzRX9s/B0ORjeB2dy5UB46aGjunMn8= -github.com/containers/image/v5 v5.32.0 h1:yjbweazPfr8xOzQ2hkkYm1A2V0jN96/kES6Gwyxj7hQ= -github.com/containers/image/v5 v5.32.0/go.mod h1:x5e0RDfGaY6bnQ13gJ2LqbfHvzssfB/y5a8HduGFxJc= +github.com/containers/common v0.60.2 h1:utcwp2YkO8c0mNlwRxsxfOiqfj157FRrBjxgjR6f+7o= +github.com/containers/common v0.60.2/go.mod h1:I0upBi1qJX3QmzGbUOBN1LVP6RvkKhd3qQpZbQT+Q54= +github.com/containers/image/v5 v5.32.2 h1:SzNE2Y6sf9b1GJoC8qjCuMBXwQrACFp4p0RK15+4gmQ= +github.com/containers/image/v5 v5.32.2/go.mod h1:v1l73VeMugfj/QtKI+jhYbwnwFCFnNGckvbST3rQ5Hk= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM= @@ -269,8 +269,8 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -303,8 +303,8 @@ github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCj github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= @@ -461,8 +461,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/microcosm-cc/bluemonday v1.0.23 h1:SMZe2IGa0NuHvnVNAZ+6B38gsTbi5e4sViiWJyDDqFY= @@ -492,10 +492,10 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= -github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= +github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -508,8 +508,8 @@ github.com/openshift-online/ocm-sdk-go v0.1.432 h1:XIlCJKxXXznMP5Usu9lVGZa+UTYVl github.com/openshift-online/ocm-sdk-go v0.1.432/go.mod h1:CiAu2jwl3ITKOxkeV0Qnhzv4gs35AmpIzVABQLtcI2Y= github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4= github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc= -github.com/osbuild/images v0.77.0 h1:O0Nv07M7b3YuY+c83/NFjWFachO+aH714zu7r+QkdsU= -github.com/osbuild/images v0.77.0/go.mod h1:14LZWLSsQ02C/vZ+EzBkp+OcdjebnWDJ8moz8o/a0J4= +github.com/osbuild/images v0.79.0 h1:6kBRo0WzmyQTZ4ojX1oU9nIxBazZOO4I3FLsZaz8268= +github.com/osbuild/images v0.79.0/go.mod h1:Rzud9PFt0L9qFFlisL9I6OsZ2h0M6viMBdPhPtpwfwg= github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1 h1:UFEJIcPa46W8gtWgOYzriRKYyy1t6SWL0BI7fPTuVvc= github.com/osbuild/osbuild-composer/pkg/splunk_logger v0.0.0-20231117174845-e969a9dc3cd1/go.mod h1:z+WA+dX6qMwc7fqY5jCzESDIlg4WR2sBQezxsoXv9Ik= github.com/osbuild/pulp-client v0.1.0 h1:L0C4ezBJGTamN3BKdv+rKLuq/WxXJbsFwz/Hj7aEmJ8= @@ -617,10 +617,10 @@ github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQ github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/vbauerster/mpb/v8 v8.7.4 h1:p4f16iMfUt3PkAC73SCzAtgtSf8TYDqEbJUT3odPrPo= -github.com/vbauerster/mpb/v8 v8.7.4/go.mod h1:r1B5k2Ljj5KJFCekfihbiqyV4VaaRTANYmvWA2btufI= -github.com/vmware/govmomi v0.39.0 h1:soLZ08Q2zvjRSinNup8xVlw0KDDCJPPA1rIDmBhi7As= -github.com/vmware/govmomi v0.39.0/go.mod h1:oHzAQ1r6152zYDGcUqeK+EO8LhKo5wjtvWZBGHws2Hc= +github.com/vbauerster/mpb/v8 v8.7.5 h1:hUF3zaNsuaBBwzEFoCvfuX3cpesQXZC0Phm/JcHZQ+c= +github.com/vbauerster/mpb/v8 v8.7.5/go.mod h1:bRCnR7K+mj5WXKsy0NWB6Or+wctYGvVwKn6huwvxKa0= +github.com/vmware/govmomi v0.42.0 h1:MbvAlVfjNBE1mHMaQ7yOSop1KLB0/93x6VAGuCtjqtI= +github.com/vmware/govmomi v0.42.0/go.mod h1:1H5LWwsBif8HKZqbFp0FdoKTHyJE4FzL6ACequMKYQg= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= @@ -637,22 +637,22 @@ go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdH go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -687,8 +687,8 @@ golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZP golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -813,26 +813,26 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.191.0 h1:cJcF09Z+4HAB2t5qTQM1ZtfL/PemsLFkcFG67qq2afk= -google.golang.org/api v0.191.0/go.mod h1:tD5dsFGxFza0hnQveGfVk9QQYKcfp+VzgRqyXFxE0+E= +google.golang.org/api v0.193.0 h1:eOGDoJFsLU+HpCBaDJex2fWiYujAw9KbXgpOAMePoUs= +google.golang.org/api v0.193.0/go.mod h1:Po3YMV1XZx+mTku3cfJrlIYR03wiGrCOsdpC67hjZvw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22duwci1+TG7bg2/L1LQsXwfjPlmuJA0= +google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= -google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index ae8b3e02e..8042bdae8 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,19 @@ # Changelog +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16) + + +### Features + +* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45)) + +## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13) + + +### Bug Fixes + +* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638) + ## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index 2a4350af3..41e03f293 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -493,7 +493,7 @@ func (o *Options2LO) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } func (o *Options2LO) validate() error { diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index 2d9a73edf..cce622418 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -190,7 +190,7 @@ func (o *DetectOptions) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index e56daa781..4308345ed 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -46,10 +46,24 @@ const ( DefaultUniverseDomain = "googleapis.com" ) -// CloneDefaultClient returns a [http.Client] with some good defaults. -func CloneDefaultClient() *http.Client { +type clonableTransport interface { + Clone() *http.Transport +} + +// DefaultClient returns an [http.Client] with some defaults set. If +// the current [http.DefaultTransport] is a [clonableTransport], as +// is the case for an [*http.Transport], the clone will be used. +// Otherwise the [http.DefaultTransport] is used directly. +func DefaultClient() *http.Client { + if transport, ok := http.DefaultTransport.(clonableTransport); ok { + return &http.Client{ + Transport: transport.Clone(), + Timeout: 30 * time.Second, + } + } + return &http.Client{ - Transport: http.DefaultTransport.(*http.Transport).Clone(), + Transport: http.DefaultTransport, Timeout: 30 * time.Second, } } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index d94e0af08..26e037c1a 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -17,7 +17,9 @@ package transport import ( "context" "crypto/tls" + "crypto/x509" "errors" + "log" "net" "net/http" "net/url" @@ -44,10 +46,12 @@ const ( googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" universeDomainPlaceholder = "UNIVERSE_DOMAIN" + + mtlsMDSRoot = "/run/google-mds-mtls/root.crt" + mtlsMDSKey = "/run/google-mds-mtls/client.key" ) var ( - mdsMTLSAutoConfigSource mtlsConfigSource errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") ) @@ -120,7 +124,20 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede defaultTransportCreds := credentials.NewTLS(&tls.Config{ GetClientCertificate: config.clientCertSource, }) - if config.s2aAddress == "" { + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + return defaultTransportCreds, config.endpoint, nil + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { return defaultTransportCreds, config.endpoint, nil } @@ -133,8 +150,9 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede } s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, }) if err != nil { // Use default if we cannot initialize S2A client transport credentials. @@ -151,7 +169,19 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, return nil, nil, err } - if config.s2aAddress == "" { + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + return config.clientCertSource, nil, nil + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { return config.clientCertSource, nil, nil } @@ -169,12 +199,38 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, } dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, }) return nil, dialTLSContextFunc, nil } +func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) { + rootPEM, err := os.ReadFile(mtlsMDSRootFile) + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(rootPEM) + if !ok { + return nil, errors.New("failed to load MTLS MDS root certificate") + } + // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain + // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the + // tls.X509KeyPair function as both the certificate chain and private key arguments. + cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile) + if err != nil { + return nil, err + } + tlsConfig := tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS13, + } + return credentials.NewTLS(&tlsConfig), nil +} + func getTransportConfig(opts *Options) (*transportConfig, error) { clientCertSource, err := GetClientCertificateProvider(opts) if err != nil { @@ -196,17 +252,17 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { return nil, errUniverseNotSupportedMTLS } - s2aMTLSEndpoint := opts.DefaultMTLSEndpoint - s2aAddress := GetS2AAddress() - if s2aAddress == "" { + mtlsS2AAddress := GetMTLSS2AAddress() + if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } return &transportConfig{ clientCertSource: clientCertSource, endpoint: endpoint, s2aAddress: s2aAddress, - s2aMTLSEndpoint: s2aMTLSEndpoint, + mtlsS2AAddress: mtlsS2AAddress, + s2aMTLSEndpoint: opts.DefaultMTLSEndpoint, }, nil } @@ -241,8 +297,10 @@ type transportConfig struct { clientCertSource cert.Provider // The corresponding endpoint to use based on client certificate source. endpoint string - // The S2A address if it can be used, otherwise an empty string. + // The plaintext S2A address if it can be used, otherwise an empty string. s2aAddress string + // The MTLS S2A address if it can be used, otherwise an empty string. + mtlsS2AAddress string // The MTLS endpoint to use with S2A. s2aMTLSEndpoint string } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 2ed532deb..4df73edce 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -16,11 +16,11 @@ package transport import ( "encoding/json" + "fmt" "log" "os" "strconv" "sync" - "time" "cloud.google.com/go/auth/internal/transport/cert" "cloud.google.com/go/compute/metadata" @@ -31,41 +31,38 @@ const ( ) var ( - // The period an MTLS config can be reused before needing refresh. - configExpiry = time.Hour + mtlsConfiguration *mtlsConfig - // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. mtlsOnce sync.Once ) // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. func GetS2AAddress() string { - c, err := getMetadataMTLSAutoConfig().Config() - if err != nil { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { return "" } - if !c.Valid() { - return "" - } - return c.S2A.PlaintextAddress + return mtlsConfiguration.S2A.PlaintextAddress } -type mtlsConfigSource interface { - Config() (*mtlsConfig, error) +// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. +// Returns empty string if not set or invalid. +func GetMTLSS2AAddress() string { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.MTLSAddress } // mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. type mtlsConfig struct { - S2A *s2aAddresses `json:"s2a"` - Expiry time.Time + S2A *s2aAddresses `json:"s2a"` } -func (c *mtlsConfig) Valid() bool { - return c != nil && c.S2A != nil && !c.expired() -} -func (c *mtlsConfig) expired() bool { - return c.Expiry.Before(time.Now()) +func (c *mtlsConfig) valid() bool { + return c != nil && c.S2A != nil } // s2aAddresses contains the plaintext and/or MTLS S2A addresses. @@ -76,80 +73,36 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } -// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. -func getMetadataMTLSAutoConfig() mtlsConfigSource { +func getMetadataMTLSAutoConfig() { + var err error mtlsOnce.Do(func() { - mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ - src: &metadataMTLSAutoConfig{}, + mtlsConfiguration, err = queryConfig() + if err != nil { + log.Printf("Getting MTLS config failed: %v", err) } }) - return mdsMTLSAutoConfigSource } -// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. -// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. -type reuseMTLSConfigSource struct { - src mtlsConfigSource // src.Config() is called when config is expired - mu sync.Mutex // mutex guards config - config *mtlsConfig // cached config -} - -func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { - cs.mu.Lock() - defer cs.mu.Unlock() - - if cs.config.Valid() { - return cs.config, nil - } - c, err := cs.src.Config() - if err != nil { - return nil, err - } - cs.config = c - return c, nil -} - -// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource -// It has the logic to query MDS and return an mtlsConfig -type metadataMTLSAutoConfig struct{} - var httpGetMetadataMTLSConfig = func() (string, error) { return metadata.Get(configEndpointSuffix) } -func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { +func queryConfig() (*mtlsConfig, error) { resp, err := httpGetMetadataMTLSConfig() if err != nil { - log.Printf("querying MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } var config mtlsConfig err = json.Unmarshal([]byte(resp), &config) if err != nil { - log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err) } - if config.S2A == nil { - log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config) } - - // set new expiry - config.Expiry = time.Now().Add(configExpiry) return &config, nil } -func defaultMTLSConfig() *mtlsConfig { - return &mtlsConfig{ - S2A: &s2aAddresses{ - PlaintextAddress: "", - MTLSAddress: "", - }, - Expiry: time.Now().Add(configExpiry), - } -} - func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { // If client cert is found, use that over S2A. if clientCertSource != nil { diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index f75c8e204..7faf6e0c9 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + ## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index a8ce6cd8a..97a57f469 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -128,7 +128,7 @@ func (o *Options3LO) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } // authCodeURL returns a URL that points to a OAuth2 consent page. diff --git a/vendor/cloud.google.com/go/compute/apiv1/accelerator_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/accelerator_types_client.go index c7443f561..238071ca2 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/accelerator_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/accelerator_types_client.go @@ -237,7 +237,7 @@ func (c *acceleratorTypesRESTClient) AggregatedList(ctx context.Context, req *co req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -402,7 +402,7 @@ func (c *acceleratorTypesRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/addresses_client.go b/vendor/cloud.google.com/go/compute/apiv1/addresses_client.go index 778101e77..e3eedb177 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/addresses_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/addresses_client.go @@ -290,7 +290,7 @@ func (c *addressesRESTClient) AggregatedList(ctx context.Context, req *computepb req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -602,7 +602,7 @@ func (c *addressesRESTClient) List(ctx context.Context, req *computepb.ListAddre req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/autoscalers_client.go b/vendor/cloud.google.com/go/compute/apiv1/autoscalers_client.go index 02231166a..0bd8f894c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/autoscalers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/autoscalers_client.go @@ -290,7 +290,7 @@ func (c *autoscalersRESTClient) AggregatedList(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -602,7 +602,7 @@ func (c *autoscalersRESTClient) List(ctx context.Context, req *computepb.ListAut req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/compute/apiv1/auxiliary_go123.go new file mode 100644 index 000000000..8699e04fb --- /dev/null +++ b/vendor/cloud.google.com/go/compute/apiv1/auxiliary_go123.go @@ -0,0 +1,752 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package compute + +import ( + "iter" + + computepb "cloud.google.com/go/compute/apiv1/computepb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AcceleratorTypeIterator) All() iter.Seq2[*computepb.AcceleratorType, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AcceleratorTypesScopedListPairIterator) All() iter.Seq2[AcceleratorTypesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AddressIterator) All() iter.Seq2[*computepb.Address, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AddressesScopedListPairIterator) All() iter.Seq2[AddressesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AutoscalerIterator) All() iter.Seq2[*computepb.Autoscaler, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *AutoscalersScopedListPairIterator) All() iter.Seq2[AutoscalersScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *BackendBucketIterator) All() iter.Seq2[*computepb.BackendBucket, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *BackendServiceIterator) All() iter.Seq2[*computepb.BackendService, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *BackendServicesScopedListPairIterator) All() iter.Seq2[BackendServicesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *CommitmentIterator) All() iter.Seq2[*computepb.Commitment, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *CommitmentsScopedListPairIterator) All() iter.Seq2[CommitmentsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *DiskIterator) All() iter.Seq2[*computepb.Disk, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *DiskTypeIterator) All() iter.Seq2[*computepb.DiskType, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *DiskTypesScopedListPairIterator) All() iter.Seq2[DiskTypesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *DisksScopedListPairIterator) All() iter.Seq2[DisksScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ExchangedPeeringRouteIterator) All() iter.Seq2[*computepb.ExchangedPeeringRoute, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ExternalVpnGatewayIterator) All() iter.Seq2[*computepb.ExternalVpnGateway, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *FirewallIterator) All() iter.Seq2[*computepb.Firewall, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *FirewallPolicyIterator) All() iter.Seq2[*computepb.FirewallPolicy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ForwardingRuleIterator) All() iter.Seq2[*computepb.ForwardingRule, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ForwardingRulesScopedListPairIterator) All() iter.Seq2[ForwardingRulesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *HealthCheckIterator) All() iter.Seq2[*computepb.HealthCheck, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *HealthCheckServiceIterator) All() iter.Seq2[*computepb.HealthCheckService, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *HealthChecksScopedListPairIterator) All() iter.Seq2[HealthChecksScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ImageIterator) All() iter.Seq2[*computepb.Image, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceGroupIterator) All() iter.Seq2[*computepb.InstanceGroup, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceGroupManagerIterator) All() iter.Seq2[*computepb.InstanceGroupManager, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceGroupManagerResizeRequestIterator) All() iter.Seq2[*computepb.InstanceGroupManagerResizeRequest, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceGroupManagersScopedListPairIterator) All() iter.Seq2[InstanceGroupManagersScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceGroupsScopedListPairIterator) All() iter.Seq2[InstanceGroupsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceIterator) All() iter.Seq2[*computepb.Instance, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceManagedByIgmErrorIterator) All() iter.Seq2[*computepb.InstanceManagedByIgmError, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceTemplateIterator) All() iter.Seq2[*computepb.InstanceTemplate, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceTemplatesScopedListPairIterator) All() iter.Seq2[InstanceTemplatesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstanceWithNamedPortsIterator) All() iter.Seq2[*computepb.InstanceWithNamedPorts, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstancesScopedListPairIterator) All() iter.Seq2[InstancesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstantSnapshotIterator) All() iter.Seq2[*computepb.InstantSnapshot, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InstantSnapshotsScopedListPairIterator) All() iter.Seq2[InstantSnapshotsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InterconnectAttachmentIterator) All() iter.Seq2[*computepb.InterconnectAttachment, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InterconnectAttachmentsScopedListPairIterator) All() iter.Seq2[InterconnectAttachmentsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InterconnectIterator) All() iter.Seq2[*computepb.Interconnect, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InterconnectLocationIterator) All() iter.Seq2[*computepb.InterconnectLocation, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *InterconnectRemoteLocationIterator) All() iter.Seq2[*computepb.InterconnectRemoteLocation, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *LicenseIterator) All() iter.Seq2[*computepb.License, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MachineImageIterator) All() iter.Seq2[*computepb.MachineImage, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MachineTypeIterator) All() iter.Seq2[*computepb.MachineType, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *MachineTypesScopedListPairIterator) All() iter.Seq2[MachineTypesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ManagedInstanceIterator) All() iter.Seq2[*computepb.ManagedInstance, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NetworkAttachmentIterator) All() iter.Seq2[*computepb.NetworkAttachment, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NetworkAttachmentsScopedListPairIterator) All() iter.Seq2[NetworkAttachmentsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NetworkEdgeSecurityServicesScopedListPairIterator) All() iter.Seq2[NetworkEdgeSecurityServicesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NetworkEndpointGroupIterator) All() iter.Seq2[*computepb.NetworkEndpointGroup, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NetworkEndpointGroupsScopedListPairIterator) All() iter.Seq2[NetworkEndpointGroupsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NetworkEndpointWithHealthStatusIterator) All() iter.Seq2[*computepb.NetworkEndpointWithHealthStatus, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NetworkIterator) All() iter.Seq2[*computepb.Network, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NodeGroupIterator) All() iter.Seq2[*computepb.NodeGroup, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NodeGroupNodeIterator) All() iter.Seq2[*computepb.NodeGroupNode, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NodeGroupsScopedListPairIterator) All() iter.Seq2[NodeGroupsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NodeTemplateIterator) All() iter.Seq2[*computepb.NodeTemplate, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NodeTemplatesScopedListPairIterator) All() iter.Seq2[NodeTemplatesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NodeTypeIterator) All() iter.Seq2[*computepb.NodeType, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NodeTypesScopedListPairIterator) All() iter.Seq2[NodeTypesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *NotificationEndpointIterator) All() iter.Seq2[*computepb.NotificationEndpoint, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *OperationIterator) All() iter.Seq2[*computepb.Operation, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *OperationsScopedListPairIterator) All() iter.Seq2[OperationsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *PacketMirroringIterator) All() iter.Seq2[*computepb.PacketMirroring, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *PacketMirroringsScopedListPairIterator) All() iter.Seq2[PacketMirroringsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *PerInstanceConfigIterator) All() iter.Seq2[*computepb.PerInstanceConfig, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ProjectIterator) All() iter.Seq2[*computepb.Project, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *PublicAdvertisedPrefixIterator) All() iter.Seq2[*computepb.PublicAdvertisedPrefix, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *PublicDelegatedPrefixIterator) All() iter.Seq2[*computepb.PublicDelegatedPrefix, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *PublicDelegatedPrefixesScopedListPairIterator) All() iter.Seq2[PublicDelegatedPrefixesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ReferenceIterator) All() iter.Seq2[*computepb.Reference, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *RegionIterator) All() iter.Seq2[*computepb.Region, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ReservationIterator) All() iter.Seq2[*computepb.Reservation, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ReservationsScopedListPairIterator) All() iter.Seq2[ReservationsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ResourcePoliciesScopedListPairIterator) All() iter.Seq2[ResourcePoliciesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ResourcePolicyIterator) All() iter.Seq2[*computepb.ResourcePolicy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *RouteIterator) All() iter.Seq2[*computepb.Route, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *RouterIterator) All() iter.Seq2[*computepb.Router, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *RoutersScopedListPairIterator) All() iter.Seq2[RoutersScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SecurityPoliciesScopedListPairIterator) All() iter.Seq2[SecurityPoliciesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SecurityPolicyIterator) All() iter.Seq2[*computepb.SecurityPolicy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ServiceAttachmentIterator) All() iter.Seq2[*computepb.ServiceAttachment, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ServiceAttachmentsScopedListPairIterator) All() iter.Seq2[ServiceAttachmentsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SnapshotIterator) All() iter.Seq2[*computepb.Snapshot, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SslCertificateIterator) All() iter.Seq2[*computepb.SslCertificate, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SslCertificatesScopedListPairIterator) All() iter.Seq2[SslCertificatesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SslPoliciesScopedListPairIterator) All() iter.Seq2[SslPoliciesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SslPolicyIterator) All() iter.Seq2[*computepb.SslPolicy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StoragePoolDiskIterator) All() iter.Seq2[*computepb.StoragePoolDisk, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StoragePoolIterator) All() iter.Seq2[*computepb.StoragePool, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StoragePoolTypeIterator) All() iter.Seq2[*computepb.StoragePoolType, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StoragePoolTypesScopedListPairIterator) All() iter.Seq2[StoragePoolTypesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StoragePoolsScopedListPairIterator) All() iter.Seq2[StoragePoolsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SubnetworkIterator) All() iter.Seq2[*computepb.Subnetwork, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SubnetworksScopedListPairIterator) All() iter.Seq2[SubnetworksScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetGrpcProxyIterator) All() iter.Seq2[*computepb.TargetGrpcProxy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetHttpProxiesScopedListPairIterator) All() iter.Seq2[TargetHttpProxiesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetHttpProxyIterator) All() iter.Seq2[*computepb.TargetHttpProxy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetHttpsProxiesScopedListPairIterator) All() iter.Seq2[TargetHttpsProxiesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetHttpsProxyIterator) All() iter.Seq2[*computepb.TargetHttpsProxy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetInstanceIterator) All() iter.Seq2[*computepb.TargetInstance, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetInstancesScopedListPairIterator) All() iter.Seq2[TargetInstancesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetPoolIterator) All() iter.Seq2[*computepb.TargetPool, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetPoolsScopedListPairIterator) All() iter.Seq2[TargetPoolsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetSslProxyIterator) All() iter.Seq2[*computepb.TargetSslProxy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetTcpProxiesScopedListPairIterator) All() iter.Seq2[TargetTcpProxiesScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetTcpProxyIterator) All() iter.Seq2[*computepb.TargetTcpProxy, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetVpnGatewayIterator) All() iter.Seq2[*computepb.TargetVpnGateway, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TargetVpnGatewaysScopedListPairIterator) All() iter.Seq2[TargetVpnGatewaysScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UrlMapIterator) All() iter.Seq2[*computepb.UrlMap, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UrlMapsScopedListPairIterator) All() iter.Seq2[UrlMapsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *UsableSubnetworkIterator) All() iter.Seq2[*computepb.UsableSubnetwork, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *VmEndpointNatMappingsIterator) All() iter.Seq2[*computepb.VmEndpointNatMappings, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *VpnGatewayIterator) All() iter.Seq2[*computepb.VpnGateway, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *VpnGatewaysScopedListPairIterator) All() iter.Seq2[VpnGatewaysScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *VpnTunnelIterator) All() iter.Seq2[*computepb.VpnTunnel, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *VpnTunnelsScopedListPairIterator) All() iter.Seq2[VpnTunnelsScopedListPair, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *XpnResourceIdIterator) All() iter.Seq2[*computepb.XpnResourceId, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *ZoneIterator) All() iter.Seq2[*computepb.Zone, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/compute/apiv1/backend_buckets_client.go b/vendor/cloud.google.com/go/compute/apiv1/backend_buckets_client.go index 3b2f18db4..0cdb87ae4 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/backend_buckets_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/backend_buckets_client.go @@ -747,7 +747,7 @@ func (c *backendBucketsRESTClient) List(ctx context.Context, req *computepb.List req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/backend_services_client.go b/vendor/cloud.google.com/go/compute/apiv1/backend_services_client.go index b05fc78b0..783065179 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/backend_services_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/backend_services_client.go @@ -474,7 +474,7 @@ func (c *backendServicesRESTClient) AggregatedList(ctx context.Context, req *com req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -978,7 +978,7 @@ func (c *backendServicesRESTClient) List(ctx context.Context, req *computepb.Lis req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1075,7 +1075,7 @@ func (c *backendServicesRESTClient) ListUsable(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/disk_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/disk_types_client.go index 8b9a9f461..ebc62f3f3 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/disk_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/disk_types_client.go @@ -233,7 +233,7 @@ func (c *diskTypesRESTClient) AggregatedList(ctx context.Context, req *computepb req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -398,7 +398,7 @@ func (c *diskTypesRESTClient) List(ctx context.Context, req *computepb.ListDiskT req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/disks_client.go b/vendor/cloud.google.com/go/compute/apiv1/disks_client.go index a93fa6788..ff5d73355 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/disks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/disks_client.go @@ -486,7 +486,7 @@ func (c *disksRESTClient) AggregatedList(ctx context.Context, req *computepb.Agg req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1020,7 +1020,7 @@ func (c *disksRESTClient) List(ctx context.Context, req *computepb.ListDisksRequ req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/external_vpn_gateways_client.go b/vendor/cloud.google.com/go/compute/apiv1/external_vpn_gateways_client.go index f5be67dd3..dd4a59e86 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/external_vpn_gateways_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/external_vpn_gateways_client.go @@ -470,7 +470,7 @@ func (c *externalVpnGatewaysRESTClient) List(ctx context.Context, req *computepb req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/firewall_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/firewall_policies_client.go index 81f5db0a9..ab236c258 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/firewall_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/firewall_policies_client.go @@ -1032,7 +1032,7 @@ func (c *firewallPoliciesRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/firewalls_client.go b/vendor/cloud.google.com/go/compute/apiv1/firewalls_client.go index 43180db03..5633535da 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/firewalls_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/firewalls_client.go @@ -470,7 +470,7 @@ func (c *firewallsRESTClient) List(ctx context.Context, req *computepb.ListFirew req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/forwarding_rules_client.go b/vendor/cloud.google.com/go/compute/apiv1/forwarding_rules_client.go index 015568945..68cb4da4f 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/forwarding_rules_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/forwarding_rules_client.go @@ -300,7 +300,7 @@ func (c *forwardingRulesRESTClient) AggregatedList(ctx context.Context, req *com req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -612,7 +612,7 @@ func (c *forwardingRulesRESTClient) List(ctx context.Context, req *computepb.Lis req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_addresses_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_addresses_client.go index fa69f00cf..8a4b06d12 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_addresses_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_addresses_client.go @@ -470,7 +470,7 @@ func (c *globalAddressesRESTClient) List(ctx context.Context, req *computepb.Lis req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_forwarding_rules_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_forwarding_rules_client.go index c742c0949..01b0299cb 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_forwarding_rules_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_forwarding_rules_client.go @@ -480,7 +480,7 @@ func (c *globalForwardingRulesRESTClient) List(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_network_endpoint_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_network_endpoint_groups_client.go index be6570688..0bba2dec8 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_network_endpoint_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_network_endpoint_groups_client.go @@ -632,7 +632,7 @@ func (c *globalNetworkEndpointGroupsRESTClient) List(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -729,7 +729,7 @@ func (c *globalNetworkEndpointGroupsRESTClient) ListNetworkEndpoints(ctx context req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_operations_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_operations_client.go index 206112469..37e70d55e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_operations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_operations_client.go @@ -253,7 +253,7 @@ func (c *globalOperationsRESTClient) AggregatedList(ctx context.Context, req *co req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -473,7 +473,7 @@ func (c *globalOperationsRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_organization_operations_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_organization_operations_client.go index 737e380b9..2c5a3f9d7 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_organization_operations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_organization_operations_client.go @@ -347,7 +347,7 @@ func (c *globalOrganizationOperationsRESTClient) List(ctx context.Context, req * req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/global_public_delegated_prefixes_client.go b/vendor/cloud.google.com/go/compute/apiv1/global_public_delegated_prefixes_client.go index e9fdcf804..65a90dbae 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/global_public_delegated_prefixes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/global_public_delegated_prefixes_client.go @@ -460,7 +460,7 @@ func (c *globalPublicDelegatedPrefixesRESTClient) List(ctx context.Context, req req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/health_checks_client.go b/vendor/cloud.google.com/go/compute/apiv1/health_checks_client.go index 9e5701f23..b53dd9b30 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/health_checks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/health_checks_client.go @@ -290,7 +290,7 @@ func (c *healthChecksRESTClient) AggregatedList(ctx context.Context, req *comput req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -600,7 +600,7 @@ func (c *healthChecksRESTClient) List(ctx context.Context, req *computepb.ListHe req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/images_client.go b/vendor/cloud.google.com/go/compute/apiv1/images_client.go index ff1415b4e..890b79ba6 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/images_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/images_client.go @@ -734,7 +734,7 @@ func (c *imagesRESTClient) List(ctx context.Context, req *computepb.ListImagesRe req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_group_manager_resize_requests_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_group_manager_resize_requests_client.go index 022d23afd..ec2346461 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_group_manager_resize_requests_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_group_manager_resize_requests_client.go @@ -532,7 +532,7 @@ func (c *instanceGroupManagerResizeRequestsRESTClient) List(ctx context.Context, req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_group_managers_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_group_managers_client.go index 6216112df..f4b8aed3b 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_group_managers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_group_managers_client.go @@ -506,7 +506,7 @@ func (c *instanceGroupManagersRESTClient) AggregatedList(ctx context.Context, re req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1112,7 +1112,7 @@ func (c *instanceGroupManagersRESTClient) List(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1209,7 +1209,7 @@ func (c *instanceGroupManagersRESTClient) ListErrors(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1306,7 +1306,7 @@ func (c *instanceGroupManagersRESTClient) ListManagedInstances(ctx context.Conte req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1403,7 +1403,7 @@ func (c *instanceGroupManagersRESTClient) ListPerInstanceConfigs(ctx context.Con req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_groups_client.go index 097a512eb..ad4fc6d05 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_groups_client.go @@ -387,7 +387,7 @@ func (c *instanceGroupsRESTClient) AggregatedList(ctx context.Context, req *comp req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -699,7 +699,7 @@ func (c *instanceGroupsRESTClient) List(ctx context.Context, req *computepb.List req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -797,7 +797,7 @@ func (c *instanceGroupsRESTClient) ListInstances(ctx context.Context, req *compu req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/instance_templates_client.go b/vendor/cloud.google.com/go/compute/apiv1/instance_templates_client.go index 328c405b1..1c00b6859 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instance_templates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instance_templates_client.go @@ -309,7 +309,7 @@ func (c *instanceTemplatesRESTClient) AggregatedList(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -681,7 +681,7 @@ func (c *instanceTemplatesRESTClient) List(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/instances_client.go b/vendor/cloud.google.com/go/compute/apiv1/instances_client.go index 274f86697..f885e3cd6 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instances_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instances_client.go @@ -918,7 +918,7 @@ func (c *instancesRESTClient) AggregatedList(ctx context.Context, req *computepb req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1898,7 +1898,7 @@ func (c *instancesRESTClient) List(ctx context.Context, req *computepb.ListInsta req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1995,7 +1995,7 @@ func (c *instancesRESTClient) ListReferrers(ctx context.Context, req *computepb. req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/instant_snapshots_client.go b/vendor/cloud.google.com/go/compute/apiv1/instant_snapshots_client.go index 172c311c6..9c274871d 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/instant_snapshots_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/instant_snapshots_client.go @@ -319,7 +319,7 @@ func (c *instantSnapshotsRESTClient) AggregatedList(ctx context.Context, req *co req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -693,7 +693,7 @@ func (c *instantSnapshotsRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/interconnect_attachments_client.go b/vendor/cloud.google.com/go/compute/apiv1/interconnect_attachments_client.go index df853852a..82809cbaa 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/interconnect_attachments_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/interconnect_attachments_client.go @@ -290,7 +290,7 @@ func (c *interconnectAttachmentsRESTClient) AggregatedList(ctx context.Context, req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -605,7 +605,7 @@ func (c *interconnectAttachmentsRESTClient) List(ctx context.Context, req *compu req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/interconnect_locations_client.go b/vendor/cloud.google.com/go/compute/apiv1/interconnect_locations_client.go index 2431c7c67..b9c37ce85 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/interconnect_locations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/interconnect_locations_client.go @@ -268,7 +268,7 @@ func (c *interconnectLocationsRESTClient) List(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/interconnect_remote_locations_client.go b/vendor/cloud.google.com/go/compute/apiv1/interconnect_remote_locations_client.go index 947cde3a1..08e359f85 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/interconnect_remote_locations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/interconnect_remote_locations_client.go @@ -268,7 +268,7 @@ func (c *interconnectRemoteLocationsRESTClient) List(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/interconnects_client.go b/vendor/cloud.google.com/go/compute/apiv1/interconnects_client.go index 2be723331..f58bc06d2 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/interconnects_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/interconnects_client.go @@ -618,7 +618,7 @@ func (c *interconnectsRESTClient) List(ctx context.Context, req *computepb.ListI req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/licenses_client.go b/vendor/cloud.google.com/go/compute/apiv1/licenses_client.go index 818036eb7..bdb37a18e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/licenses_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/licenses_client.go @@ -551,7 +551,7 @@ func (c *licensesRESTClient) List(ctx context.Context, req *computepb.ListLicens req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/machine_images_client.go b/vendor/cloud.google.com/go/compute/apiv1/machine_images_client.go index fc81e62d0..663b020c0 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/machine_images_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/machine_images_client.go @@ -554,7 +554,7 @@ func (c *machineImagesRESTClient) List(ctx context.Context, req *computepb.ListM req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/machine_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/machine_types_client.go index 05b9dec4e..6ba0462be 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/machine_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/machine_types_client.go @@ -233,7 +233,7 @@ func (c *machineTypesRESTClient) AggregatedList(ctx context.Context, req *comput req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -398,7 +398,7 @@ func (c *machineTypesRESTClient) List(ctx context.Context, req *computepb.ListMa req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/network_attachments_client.go b/vendor/cloud.google.com/go/compute/apiv1/network_attachments_client.go index c46e8ae24..c7b715ee8 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/network_attachments_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/network_attachments_client.go @@ -319,7 +319,7 @@ func (c *networkAttachmentsRESTClient) AggregatedList(ctx context.Context, req * req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -693,7 +693,7 @@ func (c *networkAttachmentsRESTClient) List(ctx context.Context, req *computepb. req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/network_edge_security_services_client.go b/vendor/cloud.google.com/go/compute/apiv1/network_edge_security_services_client.go index 962688fba..98e4f38d1 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/network_edge_security_services_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/network_edge_security_services_client.go @@ -261,7 +261,7 @@ func (c *networkEdgeSecurityServicesRESTClient) AggregatedList(ctx context.Conte req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/network_endpoint_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/network_endpoint_groups_client.go index 9f4cc0c0e..41fcb6903 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/network_endpoint_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/network_endpoint_groups_client.go @@ -310,7 +310,7 @@ func (c *networkEndpointGroupsRESTClient) AggregatedList(ctx context.Context, re req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -776,7 +776,7 @@ func (c *networkEndpointGroupsRESTClient) List(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -874,7 +874,7 @@ func (c *networkEndpointGroupsRESTClient) ListNetworkEndpoints(ctx context.Conte req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/network_firewall_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/network_firewall_policies_client.go index f23acc6d0..f58af07ad 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/network_firewall_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/network_firewall_policies_client.go @@ -1016,7 +1016,7 @@ func (c *networkFirewallPoliciesRESTClient) List(ctx context.Context, req *compu req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/networks_client.go b/vendor/cloud.google.com/go/compute/apiv1/networks_client.go index 6a1742b28..14481aa4f 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/networks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/networks_client.go @@ -669,7 +669,7 @@ func (c *networksRESTClient) List(ctx context.Context, req *computepb.ListNetwor req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -766,7 +766,7 @@ func (c *networksRESTClient) ListPeeringRoutes(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/node_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/node_groups_client.go index c8fe11287..1f30b4958 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/node_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/node_groups_client.go @@ -456,7 +456,7 @@ func (c *nodeGroupsRESTClient) AggregatedList(ctx context.Context, req *computep req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -908,7 +908,7 @@ func (c *nodeGroupsRESTClient) List(ctx context.Context, req *computepb.ListNode req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1005,7 +1005,7 @@ func (c *nodeGroupsRESTClient) ListNodes(ctx context.Context, req *computepb.Lis req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/node_templates_client.go b/vendor/cloud.google.com/go/compute/apiv1/node_templates_client.go index c8d323794..454b9dfac 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/node_templates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/node_templates_client.go @@ -309,7 +309,7 @@ func (c *nodeTemplatesRESTClient) AggregatedList(ctx context.Context, req *compu req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -683,7 +683,7 @@ func (c *nodeTemplatesRESTClient) List(ctx context.Context, req *computepb.ListN req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/node_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/node_types_client.go index 5d8433298..02d157dc3 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/node_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/node_types_client.go @@ -233,7 +233,7 @@ func (c *nodeTypesRESTClient) AggregatedList(ctx context.Context, req *computepb req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -398,7 +398,7 @@ func (c *nodeTypesRESTClient) List(ctx context.Context, req *computepb.ListNodeT req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/packet_mirrorings_client.go b/vendor/cloud.google.com/go/compute/apiv1/packet_mirrorings_client.go index ebef1f0d2..1b80e401c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/packet_mirrorings_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/packet_mirrorings_client.go @@ -290,7 +290,7 @@ func (c *packetMirroringsRESTClient) AggregatedList(ctx context.Context, req *co req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -602,7 +602,7 @@ func (c *packetMirroringsRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/projects_client.go b/vendor/cloud.google.com/go/compute/apiv1/projects_client.go index 3badffc90..0b5499bd0 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/projects_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/projects_client.go @@ -759,7 +759,7 @@ func (c *projectsRESTClient) GetXpnResources(ctx context.Context, req *computepb req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -857,7 +857,7 @@ func (c *projectsRESTClient) ListXpnHosts(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/public_advertised_prefixes_client.go b/vendor/cloud.google.com/go/compute/apiv1/public_advertised_prefixes_client.go index a2b4f0ee2..b4e341ae5 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/public_advertised_prefixes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/public_advertised_prefixes_client.go @@ -549,7 +549,7 @@ func (c *publicAdvertisedPrefixesRESTClient) List(ctx context.Context, req *comp req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/public_delegated_prefixes_client.go b/vendor/cloud.google.com/go/compute/apiv1/public_delegated_prefixes_client.go index 98540c9b9..b9460a009 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/public_delegated_prefixes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/public_delegated_prefixes_client.go @@ -300,7 +300,7 @@ func (c *publicDelegatedPrefixesRESTClient) AggregatedList(ctx context.Context, req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -682,7 +682,7 @@ func (c *publicDelegatedPrefixesRESTClient) List(ctx context.Context, req *compu req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_autoscalers_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_autoscalers_client.go index a6a3a745a..ede2a5f33 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_autoscalers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_autoscalers_client.go @@ -472,7 +472,7 @@ func (c *regionAutoscalersRESTClient) List(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_backend_services_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_backend_services_client.go index 5dc154092..4d5381df3 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_backend_services_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_backend_services_client.go @@ -674,7 +674,7 @@ func (c *regionBackendServicesRESTClient) List(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -771,7 +771,7 @@ func (c *regionBackendServicesRESTClient) ListUsable(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_commitments_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_commitments_client.go index c1169c81f..63303fb1c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_commitments_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_commitments_client.go @@ -270,7 +270,7 @@ func (c *regionCommitmentsRESTClient) AggregatedList(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -512,7 +512,7 @@ func (c *regionCommitmentsRESTClient) List(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_disk_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_disk_types_client.go index 0c3cc4a55..1f9b69c8f 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_disk_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_disk_types_client.go @@ -268,7 +268,7 @@ func (c *regionDiskTypesRESTClient) List(ctx context.Context, req *computepb.Lis req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_disks_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_disks_client.go index b96f909e1..7c6d218fa 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_disks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_disks_client.go @@ -887,7 +887,7 @@ func (c *regionDisksRESTClient) List(ctx context.Context, req *computepb.ListReg req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_health_check_services_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_health_check_services_client.go index a33c9613f..d6dc4f11c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_health_check_services_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_health_check_services_client.go @@ -462,7 +462,7 @@ func (c *regionHealthCheckServicesRESTClient) List(ctx context.Context, req *com req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_health_checks_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_health_checks_client.go index 6c83d7939..1f1704982 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_health_checks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_health_checks_client.go @@ -472,7 +472,7 @@ func (c *regionHealthChecksRESTClient) List(ctx context.Context, req *computepb. req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instance_group_managers_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instance_group_managers_client.go index 753865467..476c83c6d 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instance_group_managers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instance_group_managers_client.go @@ -982,7 +982,7 @@ func (c *regionInstanceGroupManagersRESTClient) List(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1079,7 +1079,7 @@ func (c *regionInstanceGroupManagersRESTClient) ListErrors(ctx context.Context, req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1176,7 +1176,7 @@ func (c *regionInstanceGroupManagersRESTClient) ListManagedInstances(ctx context req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -1273,7 +1273,7 @@ func (c *regionInstanceGroupManagersRESTClient) ListPerInstanceConfigs(ctx conte req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instance_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instance_groups_client.go index 46916ee8e..99d3ede4d 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instance_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instance_groups_client.go @@ -305,7 +305,7 @@ func (c *regionInstanceGroupsRESTClient) List(ctx context.Context, req *computep req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -403,7 +403,7 @@ func (c *regionInstanceGroupsRESTClient) ListInstances(ctx context.Context, req req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instance_templates_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instance_templates_client.go index 77dd53325..e89df448f 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instance_templates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instance_templates_client.go @@ -452,7 +452,7 @@ func (c *regionInstanceTemplatesRESTClient) List(ctx context.Context, req *compu req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_instant_snapshots_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_instant_snapshots_client.go index ad6260bb8..75daf51f6 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_instant_snapshots_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_instant_snapshots_client.go @@ -563,7 +563,7 @@ func (c *regionInstantSnapshotsRESTClient) List(ctx context.Context, req *comput req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_network_endpoint_groups_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_network_endpoint_groups_client.go index c06135d13..7673c7535 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_network_endpoint_groups_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_network_endpoint_groups_client.go @@ -636,7 +636,7 @@ func (c *regionNetworkEndpointGroupsRESTClient) List(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -733,7 +733,7 @@ func (c *regionNetworkEndpointGroupsRESTClient) ListNetworkEndpoints(ctx context req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_network_firewall_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_network_firewall_policies_client.go index 071aeaea9..8f1ea07a2 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_network_firewall_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_network_firewall_policies_client.go @@ -1100,7 +1100,7 @@ func (c *regionNetworkFirewallPoliciesRESTClient) List(ctx context.Context, req req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_notification_endpoints_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_notification_endpoints_client.go index c8a387653..5812b7c43 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_notification_endpoints_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_notification_endpoints_client.go @@ -452,7 +452,7 @@ func (c *regionNotificationEndpointsRESTClient) List(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_operations_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_operations_client.go index 4be12a845..481330b55 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_operations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_operations_client.go @@ -343,7 +343,7 @@ func (c *regionOperationsRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_security_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_security_policies_client.go index 98380be4c..33cb3f691 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_security_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_security_policies_client.go @@ -653,7 +653,7 @@ func (c *regionSecurityPoliciesRESTClient) List(ctx context.Context, req *comput req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_ssl_certificates_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_ssl_certificates_client.go index b9baf2f8c..94b0e9323 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_ssl_certificates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_ssl_certificates_client.go @@ -452,7 +452,7 @@ func (c *regionSslCertificatesRESTClient) List(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_ssl_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_ssl_policies_client.go index ffa01e565..99a39827c 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_ssl_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_ssl_policies_client.go @@ -481,7 +481,7 @@ func (c *regionSslPoliciesRESTClient) List(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_target_http_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_target_http_proxies_client.go index 7757843db..b6f2ced9b 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_target_http_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_target_http_proxies_client.go @@ -462,7 +462,7 @@ func (c *regionTargetHttpProxiesRESTClient) List(ctx context.Context, req *compu req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_target_https_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_target_https_proxies_client.go index e5f177a83..d0ed41cd1 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_target_https_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_target_https_proxies_client.go @@ -482,7 +482,7 @@ func (c *regionTargetHttpsProxiesRESTClient) List(ctx context.Context, req *comp req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_target_tcp_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_target_tcp_proxies_client.go index 96fee8d1e..819b4fc76 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_target_tcp_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_target_tcp_proxies_client.go @@ -452,7 +452,7 @@ func (c *regionTargetTcpProxiesRESTClient) List(ctx context.Context, req *comput req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_url_maps_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_url_maps_client.go index 437590ed5..eb98353d6 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_url_maps_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_url_maps_client.go @@ -482,7 +482,7 @@ func (c *regionUrlMapsRESTClient) List(ctx context.Context, req *computepb.ListR req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/region_zones_client.go b/vendor/cloud.google.com/go/compute/apiv1/region_zones_client.go index 8ac8a0c31..fa57c33ad 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/region_zones_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/region_zones_client.go @@ -194,7 +194,7 @@ func (c *regionZonesRESTClient) List(ctx context.Context, req *computepb.ListReg req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/regions_client.go b/vendor/cloud.google.com/go/compute/apiv1/regions_client.go index 106a8056e..ed7ad9a8a 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/regions_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/regions_client.go @@ -268,7 +268,7 @@ func (c *regionsRESTClient) List(ctx context.Context, req *computepb.ListRegions req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/reservations_client.go b/vendor/cloud.google.com/go/compute/apiv1/reservations_client.go index 8628c9072..a41939eb1 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/reservations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/reservations_client.go @@ -329,7 +329,7 @@ func (c *reservationsRESTClient) AggregatedList(ctx context.Context, req *comput req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -703,7 +703,7 @@ func (c *reservationsRESTClient) List(ctx context.Context, req *computepb.ListRe req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/resource_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/resource_policies_client.go index b8f962ad9..ba47694a0 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/resource_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/resource_policies_client.go @@ -319,7 +319,7 @@ func (c *resourcePoliciesRESTClient) AggregatedList(ctx context.Context, req *co req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -693,7 +693,7 @@ func (c *resourcePoliciesRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/routers_client.go b/vendor/cloud.google.com/go/compute/apiv1/routers_client.go index 4e13c8244..72153bd73 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/routers_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/routers_client.go @@ -357,7 +357,7 @@ func (c *routersRESTClient) AggregatedList(ctx context.Context, req *computepb.A req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -654,7 +654,7 @@ func (c *routersRESTClient) GetNatMappingInfo(ctx context.Context, req *computep req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -886,7 +886,7 @@ func (c *routersRESTClient) List(ctx context.Context, req *computepb.ListRouters req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/routes_client.go b/vendor/cloud.google.com/go/compute/apiv1/routes_client.go index fe55e761d..afe26fecd 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/routes_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/routes_client.go @@ -450,7 +450,7 @@ func (c *routesRESTClient) List(ctx context.Context, req *computepb.ListRoutesRe req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/security_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/security_policies_client.go index 61fd4c56c..f092c8a9e 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/security_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/security_policies_client.go @@ -434,7 +434,7 @@ func (c *securityPoliciesRESTClient) AggregatedList(ctx context.Context, req *co req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -809,7 +809,7 @@ func (c *securityPoliciesRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/service_attachments_client.go b/vendor/cloud.google.com/go/compute/apiv1/service_attachments_client.go index 0336b7b3c..57174b75d 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/service_attachments_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/service_attachments_client.go @@ -319,7 +319,7 @@ func (c *serviceAttachmentsRESTClient) AggregatedList(ctx context.Context, req * req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -693,7 +693,7 @@ func (c *serviceAttachmentsRESTClient) List(ctx context.Context, req *computepb. req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/snapshots_client.go b/vendor/cloud.google.com/go/compute/apiv1/snapshots_client.go index 5a7cb07d0..6118ada6b 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/snapshots_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/snapshots_client.go @@ -561,7 +561,7 @@ func (c *snapshotsRESTClient) List(ctx context.Context, req *computepb.ListSnaps req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/ssl_certificates_client.go b/vendor/cloud.google.com/go/compute/apiv1/ssl_certificates_client.go index e88f08f6a..bbb6740ff 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/ssl_certificates_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/ssl_certificates_client.go @@ -270,7 +270,7 @@ func (c *sslCertificatesRESTClient) AggregatedList(ctx context.Context, req *com req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -580,7 +580,7 @@ func (c *sslCertificatesRESTClient) List(ctx context.Context, req *computepb.Lis req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/ssl_policies_client.go b/vendor/cloud.google.com/go/compute/apiv1/ssl_policies_client.go index 1428711ea..6617096a2 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/ssl_policies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/ssl_policies_client.go @@ -299,7 +299,7 @@ func (c *sslPoliciesRESTClient) AggregatedList(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -609,7 +609,7 @@ func (c *sslPoliciesRESTClient) List(ctx context.Context, req *computepb.ListSsl req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/storage_pool_types_client.go b/vendor/cloud.google.com/go/compute/apiv1/storage_pool_types_client.go index 7f1ddb9ae..6bf07ce21 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/storage_pool_types_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/storage_pool_types_client.go @@ -233,7 +233,7 @@ func (c *storagePoolTypesRESTClient) AggregatedList(ctx context.Context, req *co req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -398,7 +398,7 @@ func (c *storagePoolTypesRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/storage_pools_client.go b/vendor/cloud.google.com/go/compute/apiv1/storage_pools_client.go index 83dc8f036..f5264a299 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/storage_pools_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/storage_pools_client.go @@ -338,7 +338,7 @@ func (c *storagePoolsRESTClient) AggregatedList(ctx context.Context, req *comput req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -712,7 +712,7 @@ func (c *storagePoolsRESTClient) List(ctx context.Context, req *computepb.ListSt req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -809,7 +809,7 @@ func (c *storagePoolsRESTClient) ListDisks(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/subnetworks_client.go b/vendor/cloud.google.com/go/compute/apiv1/subnetworks_client.go index 8c5b91b5c..e731366df 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/subnetworks_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/subnetworks_client.go @@ -358,7 +358,7 @@ func (c *subnetworksRESTClient) AggregatedList(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -809,7 +809,7 @@ func (c *subnetworksRESTClient) List(ctx context.Context, req *computepb.ListSub req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -906,7 +906,7 @@ func (c *subnetworksRESTClient) ListUsable(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_grpc_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_grpc_proxies_client.go index 8925ff4e8..73ed04666 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_grpc_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_grpc_proxies_client.go @@ -460,7 +460,7 @@ func (c *targetGrpcProxiesRESTClient) List(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_http_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_http_proxies_client.go index e15744b7e..6e6509b36 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_http_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_http_proxies_client.go @@ -290,7 +290,7 @@ func (c *targetHttpProxiesRESTClient) AggregatedList(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -600,7 +600,7 @@ func (c *targetHttpProxiesRESTClient) List(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_https_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_https_proxies_client.go index 842ce70ce..aca1278fb 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_https_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_https_proxies_client.go @@ -330,7 +330,7 @@ func (c *targetHttpsProxiesRESTClient) AggregatedList(ctx context.Context, req * req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -640,7 +640,7 @@ func (c *targetHttpsProxiesRESTClient) List(ctx context.Context, req *computepb. req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_instances_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_instances_client.go index cbcb2fc50..03b36b673 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_instances_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_instances_client.go @@ -280,7 +280,7 @@ func (c *targetInstancesRESTClient) AggregatedList(ctx context.Context, req *com req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -592,7 +592,7 @@ func (c *targetInstancesRESTClient) List(ctx context.Context, req *computepb.Lis req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_pools_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_pools_client.go index c249c6339..a7eb080f7 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_pools_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_pools_client.go @@ -494,7 +494,7 @@ func (c *targetPoolsRESTClient) AggregatedList(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -868,7 +868,7 @@ func (c *targetPoolsRESTClient) List(ctx context.Context, req *computepb.ListTar req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_ssl_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_ssl_proxies_client.go index b7102d2e3..1b16d37e9 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_ssl_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_ssl_proxies_client.go @@ -500,7 +500,7 @@ func (c *targetSslProxiesRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_tcp_proxies_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_tcp_proxies_client.go index 5e1e2c492..ee5657270 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_tcp_proxies_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_tcp_proxies_client.go @@ -290,7 +290,7 @@ func (c *targetTcpProxiesRESTClient) AggregatedList(ctx context.Context, req *co req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -600,7 +600,7 @@ func (c *targetTcpProxiesRESTClient) List(ctx context.Context, req *computepb.Li req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/target_vpn_gateways_client.go b/vendor/cloud.google.com/go/compute/apiv1/target_vpn_gateways_client.go index 86151a83b..94dd39c30 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/target_vpn_gateways_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/target_vpn_gateways_client.go @@ -280,7 +280,7 @@ func (c *targetVpnGatewaysRESTClient) AggregatedList(ctx context.Context, req *c req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -592,7 +592,7 @@ func (c *targetVpnGatewaysRESTClient) List(ctx context.Context, req *computepb.L req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/url_maps_client.go b/vendor/cloud.google.com/go/compute/apiv1/url_maps_client.go index 5f6aebda9..7883daeae 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/url_maps_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/url_maps_client.go @@ -310,7 +310,7 @@ func (c *urlMapsRESTClient) AggregatedList(ctx context.Context, req *computepb.A req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -696,7 +696,7 @@ func (c *urlMapsRESTClient) List(ctx context.Context, req *computepb.ListUrlMaps req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/vpn_gateways_client.go b/vendor/cloud.google.com/go/compute/apiv1/vpn_gateways_client.go index 726690450..629d5c950 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/vpn_gateways_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/vpn_gateways_client.go @@ -309,7 +309,7 @@ func (c *vpnGatewaysRESTClient) AggregatedList(ctx context.Context, req *compute req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -676,7 +676,7 @@ func (c *vpnGatewaysRESTClient) List(ctx context.Context, req *computepb.ListVpn req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/vpn_tunnels_client.go b/vendor/cloud.google.com/go/compute/apiv1/vpn_tunnels_client.go index f25369d1f..033dc01f6 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/vpn_tunnels_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/vpn_tunnels_client.go @@ -280,7 +280,7 @@ func (c *vpnTunnelsRESTClient) AggregatedList(ctx context.Context, req *computep req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } @@ -592,7 +592,7 @@ func (c *vpnTunnelsRESTClient) List(ctx context.Context, req *computepb.ListVpnT req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/zone_operations_client.go b/vendor/cloud.google.com/go/compute/apiv1/zone_operations_client.go index 126f1f614..598b56d63 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/zone_operations_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/zone_operations_client.go @@ -343,7 +343,7 @@ func (c *zoneOperationsRESTClient) List(ctx context.Context, req *computepb.List req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/apiv1/zones_client.go b/vendor/cloud.google.com/go/compute/apiv1/zones_client.go index e07b69345..0e760371a 100644 --- a/vendor/cloud.google.com/go/compute/apiv1/zones_client.go +++ b/vendor/cloud.google.com/go/compute/apiv1/zones_client.go @@ -268,7 +268,7 @@ func (c *zonesRESTClient) List(ctx context.Context, req *computepb.ListZonesRequ req.PageToken = proto.String(pageToken) } if pageSize > math.MaxInt32 { - req.MaxResults = proto.Uint32(math.MaxInt32) + req.MaxResults = proto.Uint32(uint32(math.MaxInt32)) } else if pageSize != 0 { req.MaxResults = proto.Uint32(uint32(pageSize)) } diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 7fdea210d..50f34dc83 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.27.5" +const Version = "1.28.0" diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index a3e99df29..54acae7cd 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -346,7 +346,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/backupdr/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/baremetalsolution/apiv2": { @@ -526,7 +526,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/migration/apiv2alpha": { @@ -589,6 +589,26 @@ "release_level": "stable", "library_type": "GAPIC_MANUAL" }, + "cloud.google.com/go/bigtable/admin/apiv2": { + "api_shortname": "bigtableadmin", + "distribution_name": "cloud.google.com/go/bigtable/admin/apiv2", + "description": "Cloud Bigtable Admin API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest/admin/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigtable/apiv2": { + "api_shortname": "bigtable", + "distribution_name": "cloud.google.com/go/bigtable/apiv2", + "description": "Cloud Bigtable API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/billing/apiv1": { "api_shortname": "cloudbilling", "distribution_name": "cloud.google.com/go/billing/apiv1", @@ -696,7 +716,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudcontrolspartner/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudcontrolspartner/apiv1beta": { @@ -736,7 +756,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2": { @@ -1166,7 +1186,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgenetwork/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/errorreporting": { @@ -1959,6 +1979,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/privilegedaccessmanager/apiv1": { + "api_shortname": "privilegedaccessmanager", + "distribution_name": "cloud.google.com/go/privilegedaccessmanager/apiv1", + "description": "Privileged Access Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privilegedaccessmanager/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/profiler": { "api_shortname": "cloudprofiler", "distribution_name": "cloud.google.com/go/profiler", @@ -2226,7 +2256,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/privateca/apiv1": { @@ -2316,7 +2346,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycentermanagement/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securityposture/apiv1": { @@ -2606,7 +2636,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/control/apiv2", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storage/internal/apiv2": { @@ -2686,7 +2716,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/telcoautomation/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/texttospeech/apiv1": { diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b78..33c88305c 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9..78bddf1ce 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40..78f95f256 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba..118e49e81 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd..05f5e7dfe 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd8..cf9d42aed 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index 318450a3a..fb5e1b174 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -11,6 +11,7 @@ import ( "github.com/containers/image/v5/pkg/compression" compressiontypes "github.com/containers/image/v5/pkg/compression/types" "github.com/containers/image/v5/types" + chunkedToc "github.com/containers/storage/pkg/chunked/toc" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) @@ -34,10 +35,10 @@ var ( // bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step. type bpDetectCompressionStepData struct { - isCompressed bool - format compressiontypes.Algorithm // Valid if isCompressed - decompressor compressiontypes.DecompressorFunc // Valid if isCompressed - srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob. + isCompressed bool + format compressiontypes.Algorithm // Valid if isCompressed + decompressor compressiontypes.DecompressorFunc // Valid if isCompressed + srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob. } // blobPipelineDetectCompressionStep updates *stream to detect its current compression format. @@ -51,15 +52,25 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI } stream.reader = reader + if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName { + tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations) + if err != nil { + return bpDetectCompressionStepData{}, err + } + if tocDigest != nil { + format = compression.ZstdChunked + } + + } res := bpDetectCompressionStepData{ isCompressed: decompressor != nil, format: format, decompressor: decompressor, } if res.isCompressed { - res.srcCompressorName = format.Name() + res.srcCompressorBaseVariantName = format.BaseVariantName() } else { - res.srcCompressorName = internalblobinfocache.Uncompressed + res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed } if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() { @@ -70,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI // bpCompressionStepData contains data that the copy pipeline needs about the compression step. type bpCompressionStepData struct { - operation bpcOperation // What we are actually doing - uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) - uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. - uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. - srcCompressorName string // Compressor name to record in the blob info cache for the source blob. - uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob. - closers []io.Closer // Objects to close after the upload is done, if any. + operation bpcOperation // What we are actually doing + uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do) + uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits. + uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed. + srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob. + uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob. + uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob. + closers []io.Closer // Objects to close after the upload is done, if any. } type bpcOperation int @@ -128,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp // We can’t do anything with an encrypted blob unless decrypted. logrus.Debugf("Using original blob without modification for encrypted blob") return &bpCompressionStepData{ - operation: bpcOpPreserveOpaque, - uploadedOperation: types.PreserveOriginal, - uploadedAlgorithm: nil, - srcCompressorName: internalblobinfocache.UnknownCompression, - uploadedCompressorName: internalblobinfocache.UnknownCompression, + operation: bpcOpPreserveOpaque, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, }, nil } return nil, nil @@ -156,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp Digest: "", Size: -1, } + specificVariantName := uploadedAlgorithm.Name() + if specificVariantName == uploadedAlgorithm.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } return &bpCompressionStepData{ - operation: bpcOpCompressUncompressed, - uploadedOperation: types.Compress, - uploadedAlgorithm: uploadedAlgorithm, - uploadedAnnotations: annotations, - srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: uploadedAlgorithm.Name(), - closers: []io.Closer{reader}, + operation: bpcOpCompressUncompressed, + uploadedOperation: types.Compress, + uploadedAlgorithm: uploadedAlgorithm, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{reader}, }, nil } return nil, nil @@ -196,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp Digest: "", Size: -1, } + specificVariantName := ic.compressionFormat.Name() + if specificVariantName == ic.compressionFormat.BaseVariantName() { + specificVariantName = internalblobinfocache.UnknownCompression + } succeeded = true return &bpCompressionStepData{ - operation: bpcOpRecompressCompressed, - uploadedOperation: types.PreserveOriginal, - uploadedAlgorithm: ic.compressionFormat, - uploadedAnnotations: annotations, - srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: ic.compressionFormat.Name(), - closers: []io.Closer{decompressed, recompressed}, + operation: bpcOpRecompressCompressed, + uploadedOperation: types.PreserveOriginal, + uploadedAlgorithm: ic.compressionFormat, + uploadedAnnotations: annotations, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(), + uploadedCompressorSpecificVariantName: specificVariantName, + closers: []io.Closer{decompressed, recompressed}, }, nil } return nil, nil @@ -225,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp Size: -1, } return &bpCompressionStepData{ - operation: bpcOpDecompressCompressed, - uploadedOperation: types.Decompress, - uploadedAlgorithm: nil, - srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: internalblobinfocache.Uncompressed, - closers: []io.Closer{s}, + operation: bpcOpDecompressCompressed, + uploadedOperation: types.Decompress, + uploadedAlgorithm: nil, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, + closers: []io.Closer{s}, }, nil } return nil, nil @@ -268,11 +292,15 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom algorithm = nil } return &bpCompressionStepData{ - operation: bpcOp, - uploadedOperation: uploadedOp, - uploadedAlgorithm: algorithm, - srcCompressorName: detected.srcCompressorName, - uploadedCompressorName: detected.srcCompressorName, + operation: bpcOp, + uploadedOperation: uploadedOp, + uploadedAlgorithm: algorithm, + srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + // We only record the base variant of the format on upload; we didn’t do anything with + // the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger + // reuse of any kind between the blob digest and the TOC digest. + uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName, + uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression, } } @@ -308,6 +336,15 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf // No useful information case bpcOpCompressUncompressed: c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest) + if d.uploadedAnnotations != nil { + tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations) + if err != nil { + return fmt.Errorf("parsing just-created compression annotations: %w", err) + } + if tocDigest != nil { + c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest) + } + } case bpcOpDecompressCompressed: c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest) case bpcOpRecompressCompressed, bpcOpPreserveCompressed: @@ -323,29 +360,27 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation) } } - if d.srcCompressorName == "" || d.uploadedCompressorName == "" { - return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)", - d.srcCompressorName, d.uploadedCompressorName) + if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" { + return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)", + d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName) } - if d.uploadedCompressorName != internalblobinfocache.UnknownCompression { - if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Don’t record zstd:chunked algorithms. - // There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions, - // and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless. - // - // We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate - // between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName - // with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about - // inconsistent data to be logged. - c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) - } + if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.uploadedCompressorBaseVariantName, + SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName, + SpecificVariantAnnotations: d.uploadedAnnotations, + }) } if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && - d.srcCompressorName != internalblobinfocache.UnknownCompression { - if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Don’t record zstd:chunked algorithms, see above. - c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) - } + d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression { + // If the source is already using some TOC-dependent variant, we either copied the + // blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest, + // so record neither the variant name, nor the TOC digest. + c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{ + BaseVariantCompressor: d.srcCompressorBaseVariantName, + SpecificVariantCompressor: internalblobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + }) } return nil } diff --git a/vendor/github.com/containers/image/v5/copy/encryption.go b/vendor/github.com/containers/image/v5/copy/encryption.go index 4259d355b..b5a88a914 100644 --- a/vendor/github.com/containers/image/v5/copy/encryption.go +++ b/vendor/github.com/containers/image/v5/copy/encryption.go @@ -48,7 +48,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo Annotations: stream.info.Annotations, } // DecryptLayer supposedly returns a digest of the decrypted stream. - // In pratice, that value is never set in the current implementation. + // In practice, that value is never set in the current implementation. // And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key, // i.e. it doesn’t authenticate the origin of the metadata in any way. reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false) diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index 053650b8d..08128ce8d 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -121,7 +121,7 @@ func (c *copier) printCopyInfo(kind string, info types.BlobInfo) { } } -// mark100PercentComplete marks the progres bars as 100% complete; +// mark100PercentComplete marks the progress bars as 100% complete; // it may do so by possibly advancing the current state if it is below the known total. func (bar *progressBar) mark100PercentComplete() { if bar.originalSize > 0 { diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index 17cc8c833..ba414a22d 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "maps" "reflect" "slices" "strings" @@ -149,6 +150,28 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar ic.compressionFormat = c.options.DestinationCtx.CompressionFormat ic.compressionLevel = c.options.DestinationCtx.CompressionLevel } + // HACK: Don’t combine zstd:chunked and encryption. + // zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption + // to support decrypting arbitrary subsets of the stream. That’s plausible but not supported using the encryption API we have. + // Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without + // encryption. (That can be determined from the unencrypted config anyway, but, still...) + // + // Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of + // hard-coding zstd:chunked / zstd. + if ic.c.options.OciEncryptLayers != nil { + format := ic.compressionFormat + if format == nil { + format = defaultCompressionFormat + } + if format.Name() == compressiontypes.ZstdChunkedAlgorithmName { + if ic.requireCompressionFormatMatch { + return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead") + } + logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead") + ic.compressionFormat = &compression.Zstd + } + } + // Decide whether we can substitute blobs with semantic equivalents: // - Don’t do that if we can’t modify the manifest at all // - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it. @@ -192,7 +215,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible noPendingManifestUpdates := ic.noPendingManifestUpdates() - logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch) + logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for reusing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch) if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch { matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance) if err != nil { @@ -866,21 +889,33 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse // Handling of compression, encryption, and the related MIME types and the like are all the responsibility // of the generic code in this package. res := types.BlobInfo{ - Digest: reusedBlob.Digest, - Size: reusedBlob.Size, - URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. - Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls) - MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. + Digest: reusedBlob.Digest, + Size: reusedBlob.Size, + URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior. + // FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t + // (but those annotations being left with incorrect values should not break pulls). + Annotations: maps.Clone(inputInfo.Annotations), + MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation. CompressionOperation: reusedBlob.CompressionOperation, CompressionAlgorithm: reusedBlob.CompressionAlgorithm, CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway. } // The transport is only expected to fill CompressionOperation and CompressionAlgorithm - // if the blob was substituted; otherwise, fill it in based + // if the blob was substituted; otherwise, it is optional, and if not set, fill it in based // on what we know from the srcInfos we were given. if reusedBlob.Digest == inputInfo.Digest { - res.CompressionOperation = inputInfo.CompressionOperation - res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + if res.CompressionOperation == types.PreserveOriginal { + res.CompressionOperation = inputInfo.CompressionOperation + } + if res.CompressionAlgorithm == nil { + res.CompressionAlgorithm = inputInfo.CompressionAlgorithm + } + } + if len(reusedBlob.CompressionAnnotations) != 0 { + if res.Annotations == nil { + res.Annotations = map[string]string{} + } + maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations) } return res } diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 7f7a74bd3..ed3d4a2c0 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -332,6 +332,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") } + originalCandidateKnownToBeMissing := false if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { // First, check whether the blob happens to already exist at the destination. haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) @@ -341,9 +342,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, if haveBlob { return true, reusedInfo, nil } + originalCandidateKnownToBeMissing = true } else { logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v", optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) + // We can get here with a blob detected to be zstd when the user wants a zstd:chunked. + // In that case we keep originalCandiateKnownToBeMissing = false, so that if we find + // a BIC entry for this blob, we do use that entry and return a zstd:chunked entry + // with the BIC’s annotations. + // This is not quite correct, it only works if the BIC also contains an acceptable _location_. + // Ideally, we could look up just the compression algorithm/annotations for info.digest, + // and use it even if no location candidate exists and the original dandidate is present. } // Then try reusing blobs from other locations. @@ -387,7 +396,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // for it in the current repo. candidateRepo = reference.TrimNamed(d.ref.ref) } - if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { + if originalCandidateKnownToBeMissing && + candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { logrus.Debug("... Already tried the primary destination") continue } @@ -427,10 +437,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) return true, private.ReusedBlob{ - Digest: candidate.Digest, - Size: size, - CompressionOperation: candidate.CompressionOperation, - CompressionAlgorithm: candidate.CompressionAlgorithm}, nil + Digest: candidate.Digest, + Size: size, + CompressionOperation: candidate.CompressionOperation, + CompressionAlgorithm: candidate.CompressionAlgorithm, + CompressionAnnotations: candidate.CompressionAnnotations, + }, nil } return false, private.ReusedBlob{}, nil diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go index 893aa959d..f31ee3124 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -27,7 +27,14 @@ func (bic *v1OnlyBlobInfoCache) Open() { func (bic *v1OnlyBlobInfoCache) Close() { } -func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + return "" +} + +func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { +} + +func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) { } func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 { diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go index c9e4aaa48..acf82ee63 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -26,19 +26,40 @@ type BlobInfoCache2 interface { // Close destroys state created by Open(). Close() - // RecordDigestCompressorName records a compressor for the blob with the specified digest, - // or Uncompressed or UnknownCompression. - // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a - // digest just because some remote author claims so (e.g. because a manifest says so); + // UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. + // Returns "" if the uncompressed digest is unknown. + UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest + // RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) + + // RecordDigestCompressorData records data for the blob with the specified digest. + // WARNING: Only call this with LOCALLY VERIFIED data: + // - don’t record a compressor for a digest just because some remote author claims so + // (e.g. because a manifest says so); + // - don’t record the non-base variant or annotations if we are not _sure_ that the base variant + // and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them + // in a manifest) // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. - RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) + RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) // that could possibly be reused within the specified (transport scope) (if they still // exist, which is not guaranteed). CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 } +// DigestCompressorData is information known about how a blob is compressed. +// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.) +type DigestCompressorData struct { + BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression. + // The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression: + SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant) + SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant. +} + // CandidateLocations2Options are used in CandidateLocations2. type CandidateLocations2Options struct { // If !CanSubstitute, the returned candidates will match the submitted digest exactly; if @@ -51,9 +72,10 @@ type CandidateLocations2Options struct { // BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed - CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed - UnknownLocation bool // is true when `Location` for this blob is not set - Location types.BICLocationReference // not set if UnknownLocation is set to `true` + Digest digest.Digest + CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm + UnknownLocation bool // is true when `Location` for this blob is not set + Location types.BICLocationReference // not set if UnknownLocation is set to `true` } diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go index cdd3c5e5d..f5a38541a 100644 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go +++ b/vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go @@ -76,6 +76,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob Size: blob.Size, CompressionOperation: blob.CompressionOperation, CompressionAlgorithm: blob.CompressionAlgorithm, + // CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated + // annotations, and we didn’t use the blob.Annotations field previously, so we’ll + // continue not using it. }, nil } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go index ee0ddc772..3fb52104a 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go @@ -205,11 +205,6 @@ type ReuseConditions struct { // (which can be nil to represent uncompressed or unknown) matches reuseConditions. func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool { if c.RequiredCompression != nil { - if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName { - // HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs. - // The caller must re-compress to build those annotations. - return false - } if candidateCompression == nil || (c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { return false diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go index 63fb9326d..d81ea6703 100644 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ b/vendor/github.com/containers/image/v5/internal/private/private.go @@ -134,9 +134,14 @@ type ReusedBlob struct { Size int64 // Must be provided // The following compression fields should be set when the reuse substitutes // a differently-compressed blob. + // They may be set also to change from a base variant to a specific variant of an algorithm. CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A + // Annotations that should be added, for CompressionAlgorithm. Note that they might need to be + // added even if the digest doesn’t change (if we found the annotations in a cache). + CompressionAnnotations map[string]string + MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index 818166834..7e53f4f54 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -202,7 +202,7 @@ func (m *Schema2) ConfigInfo() types.BlobInfo { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *Schema2) LayerInfos() []LayerInfo { - blobs := []LayerInfo{} + blobs := make([]LayerInfo, 0, len(m.LayersDescriptors)) for _, layer := range m.LayersDescriptors { blobs = append(blobs, LayerInfo{ BlobInfo: BlobInfoFromSchema2Descriptor(layer), diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index 497cf476e..f714574ee 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -95,7 +95,7 @@ func (m *OCI1) ConfigInfo() types.BlobInfo { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *OCI1) LayerInfos() []LayerInfo { - blobs := []LayerInfo{} + blobs := make([]LayerInfo, 0, len(m.Layers)) for _, layer := range m.Layers { blobs = append(blobs, LayerInfo{ BlobInfo: BlobInfoFromOCI1Descriptor(layer), diff --git a/vendor/github.com/containers/image/v5/ostree/ostree_src.go b/vendor/github.com/containers/image/v5/ostree/ostree_src.go index 85a89f253..0b597ce26 100644 --- a/vendor/github.com/containers/image/v5/ostree/ostree_src.go +++ b/vendor/github.com/containers/image/v5/ostree/ostree_src.go @@ -151,9 +151,9 @@ func openRepo(path string) (*C.struct_OstreeRepo, error) { var cerr *C.GError cpath := C.CString(path) defer C.free(unsafe.Pointer(cpath)) - pathc := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(pathc)) - repo := C.ostree_repo_new(pathc) + file := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(file)) + repo := C.ostree_repo_new(file) r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) if !r { C.g_object_unref(C.gpointer(repo)) diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 9cd9c8f7d..40ed09bed 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -25,57 +25,133 @@ const replacementAttempts = 5 // This is a heuristic/guess, and could well use a different value. const replacementUnknownLocationAttempts = 2 -// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob -// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options. +// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest, +// which can be later combined with information about a location. +type CandidateTemplate struct { + digest digest.Digest + compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed + compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed + compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm +} + +// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable +// for a CandidateLocations* call with v2Options. // // v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known); // if not nil, the call is assumed to be CandidateLocations2. -// -// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2 -func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) { +func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate { if v2Options == nil { - return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used. + return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used. + digest: digest, + } } - var op types.LayerCompression - var algo *compression.Algorithm - switch compressorName { + requiredCompression := "nil" + if v2Options.RequiredCompression != nil { + requiredCompression = v2Options.RequiredCompression.Name() + } + switch data.BaseVariantCompressor { case blobinfocache.Uncompressed: - op = types.Decompress - algo = nil + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, nil) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v", + digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Decompress, + compressionAlgorithm: nil, + compressionAnnotations: nil, + } case blobinfocache.UnknownCompression: logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String()) - return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2 + return nil // Not allowed with CandidateLocations2 default: - op = types.Compress - algo_, err := compression.AlgorithmByName(compressorName) + // See if we can use the specific variant, first. + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor) + if err != nil { + logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v", + data.SpecificVariantCompressor, digest.String(), err) + } else { + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v", + data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats) + } else { + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: data.SpecificVariantAnnotations, + } + } + } + } + + // Try the base variant. + algo, err := compression.AlgorithmByName(data.BaseVariantCompressor) if err != nil { logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v", - digest.String(), compressorName, err) - return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required + digest.String(), data.BaseVariantCompressor, err) + return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required } - algo = &algo_ - } - if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ - PossibleManifestFormats: v2Options.PossibleManifestFormats, - RequiredCompression: v2Options.RequiredCompression, - }, algo) { - requiredCompresssion := "nil" - if v2Options.RequiredCompression != nil { - requiredCompresssion = v2Options.RequiredCompression.Name() + if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ + PossibleManifestFormats: v2Options.PossibleManifestFormats, + RequiredCompression: v2Options.RequiredCompression, + }, &algo) { + logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v", + digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats) + return nil + } + return &CandidateTemplate{ + digest: digest, + compressionOperation: types.Compress, + compressionAlgorithm: &algo, + compressionAnnotations: nil, } - logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v", - digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats) - return false, types.PreserveOriginal, nil } - - return true, op, algo } // CandidateWithTime is the input to types.BICReplacementCandidate prioritization. type CandidateWithTime struct { - Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate - LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) + candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate + lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation) +} + +// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen) +func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: false, + Location: location, + }, + lastSeen: lastSeen, + } +} + +// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location. +func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime { + return CandidateWithTime{ + candidate: blobinfocache.BICReplacementCandidate2{ + Digest: template.digest, + CompressionOperation: template.compressionOperation, + CompressionAlgorithm: template.compressionAlgorithm, + CompressionAnnotations: template.compressionAnnotations, + UnknownLocation: true, + Location: types.BICLocationReference{Opaque: ""}, + }, + lastSeen: time.Time{}, + } } // candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize, @@ -91,35 +167,35 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int { // Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order) // First, deal with the primaryDigest/uncompressedDigest cases: - if xi.Candidate.Digest != xj.Candidate.Digest { + if xi.candidate.Digest != xj.candidate.Digest { // - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter - if xi.Candidate.Digest == css.primaryDigest { + if xi.candidate.Digest == css.primaryDigest { return -1 } - if xj.Candidate.Digest == css.primaryDigest { + if xj.candidate.Digest == css.primaryDigest { return 1 } if css.uncompressedDigest != "" { - if xi.Candidate.Digest == css.uncompressedDigest { + if xi.candidate.Digest == css.uncompressedDigest { return 1 } - if xj.Candidate.Digest == css.uncompressedDigest { + if xj.candidate.Digest == css.uncompressedDigest { return -1 } } } else { // xi.Candidate.Digest == xj.Candidate.Digest // The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time - if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) { - return -xi.LastSeen.Compare(xj.LastSeen) + if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) { + return -xi.lastSeen.Compare(xj.lastSeen) } } // Neither of the digests are primaryDigest/uncompressedDigest: - if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time + if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time return -cmp } // Fall back to digest, if timestamps end up _exactly_ the same (how?!) - return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest) + return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest) } // destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the @@ -138,7 +214,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, uncompressedDigest: uncompressedDigest, }).compare) for _, candidate := range cs { - if candidate.Candidate.UnknownLocation { + if candidate.candidate.UnknownLocation { unknownLocationCandidates = append(unknownLocationCandidates, candidate) } else { knownLocationCandidates = append(knownLocationCandidates, candidate) @@ -150,11 +226,11 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates)) res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed) for i := 0; i < knownLocationCandidatesUsed; i++ { - res[i] = knownLocationCandidates[i].Candidate + res[i] = knownLocationCandidates[i].candidate } // If candidates with unknown location are found, lets add them to final list for i := 0; i < unknownLocationCandidatesUsed; i++ { - res = append(res, unknownLocationCandidates[i].Candidate) + res = append(res, unknownLocationCandidates[i].candidate) } return res } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 32aaa4d6f..9d4125d66 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -24,10 +24,11 @@ type locationKey struct { type cache struct { mutex sync.Mutex // The following fields can only be accessed with mutex held. - uncompressedDigests map[digest.Digest]digest.Digest - digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest - knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference - compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest + uncompressedDigests map[digest.Digest]digest.Digest + uncompressedDigestsByTOC map[digest.Digest]digest.Digest + digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest + knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference + compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression } // New returns a BlobInfoCache implementation which is in-memory only. @@ -44,10 +45,11 @@ func New() types.BlobInfoCache { func new2() *cache { return &cache{ - uncompressedDigests: map[digest.Digest]digest.Digest{}, - digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, - knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, - compressors: map[digest.Digest]string{}, + uncompressedDigests: map[digest.Digest]digest.Digest{}, + uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{}, + digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{}, + knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{}, + compressors: map[digest.Digest]blobinfocache.DigestCompressorData{}, } } @@ -104,6 +106,30 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre anyDigestSet.Add(anyDigest) } +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok { + return d + } + return "" +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + mem.mutex.Lock() + defer mem.mutex.Unlock() + if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + mem.uncompressedDigestsByTOC[tocDigest] = uncompressed +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { @@ -118,19 +144,40 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type locationScope[location] = time.Now() // Possibly overwriting an older entry. } -// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified -// algorithm, or uncompressed, or that we no longer know. -func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { mem.mutex.Lock() defer mem.mutex.Unlock() - if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName { - logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName) + if previous, ok := mem.compressors[anyDigest]; ok { + if previous.BaseVariantCompressor != data.BaseVariantCompressor { + logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor) + } else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != data.SpecificVariantCompressor { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor) + } + // We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic. + + // Preserve specific variant information if the incoming data does not have it. + if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression && + previous.SpecificVariantCompressor != blobinfocache.UnknownCompression { + data.SpecificVariantCompressor = previous.SpecificVariantCompressor + data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations + } } - if compressorName == blobinfocache.UnknownCompression { - delete(mem.compressors, blobDigest) + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { + delete(mem.compressors, anyDigest) return } - mem.compressors[blobDigest] = compressorName + mem.compressors[anyDigest] = data } // appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory @@ -140,38 +187,25 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso // with unknown compression. func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime { - compressorName := blobinfocache.UnknownCompression - if v, ok := mem.compressors[digest]; ok { - compressorName = v + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, } - ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName) - if !ok { + if v, ok := mem.compressors[digest]; ok { + compressionData = v + } + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { return candidates } locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present if len(locations) > 0 { for l, t := range locations { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - Location: l, - }, - LastSeen: t, - }) + candidates = append(candidates, template.CandidateWithLocation(l, t)) } } else if v2Options != nil { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, - }, - LastSeen: time.Time{}, - }) + candidates = append(candidates, template.CandidateWithUnknownLocation()) } return candidates } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go index 4b7122f92..9a2219e79 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go @@ -34,6 +34,19 @@ func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { } +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + return "" +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go index a5be85a65..1a7931023 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -3,6 +3,7 @@ package sqlite import ( "database/sql" + "encoding/json" "errors" "fmt" "sync" @@ -295,6 +296,24 @@ func ensureDBHasCurrentSchema(db *sql.DB) error { `PRIMARY KEY (transport, scope, digest, location) )`, }, + { + "DigestTOCUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` + + // index implied by PRIMARY KEY + `tocDigest TEXT PRIMARY KEY NOT NULL,` + + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors. + `CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // The compressor is not `UnknownCompression`. + `specificVariantCompressor TEXT NOT NULL, + specificVariantAnnotations BLOB NOT NULL + )`, + }, } _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { @@ -385,6 +404,57 @@ func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } +// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. +// Returns "" if the uncompressed digest is unknown. +func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + return "", nil + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)", + tocDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, // and can be reused given the opaque location data. func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) { @@ -398,29 +468,58 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type }) // FIXME? Log error (but throttle the log volume on repeated accesses)? } -// RecordDigestCompressorName records a compressor for the blob with the specified digest, -// or Uncompressed or UnknownCompression. -// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a -// digest just because some remote author claims so (e.g. because a manifest says so); +// RecordDigestCompressorData records data for the blob with the specified digest. +// WARNING: Only call this with LOCALLY VERIFIED data: +// - don’t record a compressor for a digest just because some remote author claims so +// (e.g. because a manifest says so); +// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant +// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them +// in a manifest) +// // otherwise the cache could be poisoned and cause us to make incorrect edits to type // information in a manifest. -func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { +func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) { _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) if err != nil { - return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest) + return void{}, fmt.Errorf("looking for compressor of %q", anyDigest) } - if gotPrevious && previous != compressorName { - logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName) + warned := false + if gotPrevious && previous != data.BaseVariantCompressor { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor) + warned = true } - if compressorName == blobinfocache.UnknownCompression { + if data.BaseVariantCompressor == blobinfocache.UnknownCompression { if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) } + if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err) + } } else { if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", - anyDigest.String(), compressorName); err != nil { - return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err) + anyDigest.String(), data.BaseVariantCompressor); err != nil { + return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err) + } + } + + if data.SpecificVariantCompressor != blobinfocache.UnknownCompression { + if !warned { // Don’t warn twice about the same digest + prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest) + } + if found && data.SpecificVariantCompressor != prevSVC { + logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor) + } + } + annotations, err := json.Marshal(data.SpecificVariantAnnotations) + if err != nil { + return void{}, err + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)", + anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil { + return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err) } } return void{}, nil @@ -433,18 +532,33 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor // with unknown compression. func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) { - compressorName := blobinfocache.UnknownCompression + compressionData := blobinfocache.DigestCompressorData{ + BaseVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantCompressor: blobinfocache.UnknownCompression, + SpecificVariantAnnotations: nil, + } if v2Options != nil { - compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String()) - if err != nil { - return nil, fmt.Errorf("scanning compressorName: %w", err) - } - if found { - compressorName = compressor + var baseVariantCompressor string + var specificVariantCompressor sql.NullString + var annotationBytes []byte + switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+ + "FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()). + Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); { + case errors.Is(err, sql.ErrNoRows): // Do nothing + case err != nil: + return nil, fmt.Errorf("scanning compressor data: %w", err) + default: + compressionData.BaseVariantCompressor = baseVariantCompressor + if specificVariantCompressor.Valid && annotationBytes != nil { + compressionData.SpecificVariantCompressor = specificVariantCompressor.String + if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil { + return nil, err + } + } } } - ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName) - if !ok { + template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData) + if template == nil { return candidates, nil } @@ -463,15 +577,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW if err := rows.Scan(&location, &time); err != nil { return nil, fmt.Errorf("scanning candidate: %w", err) } - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - Location: types.BICLocationReference{Opaque: location}, - }, - LastSeen: time, - }) + candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time)) rowAdded = true } if err := rows.Err(); err != nil { @@ -479,16 +585,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW } if !rowAdded && v2Options != nil { - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressionOperation: compressionOp, - CompressionAlgorithm: compressionAlgo, - UnknownLocation: true, - Location: types.BICLocationReference{Opaque: ""}, - }, - LastSeen: time.Time{}, - }) + candidates = append(candidates, template.CandidateWithUnknownLocation()) } return candidates, nil } @@ -516,40 +613,41 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types if err != nil { return nil, err } - - // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. - // (In the extreme, we could turn _everything_ this function does into a single query. - // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) - // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. - rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) - if err != nil { - return nil, fmt.Errorf("querying for other digests: %w", err) - } - defer rows.Close() - for rows.Next() { - var otherDigestString string - if err := rows.Scan(&otherDigestString); err != nil { - return nil, fmt.Errorf("scanning other digest: %w", err) - } - otherDigest, err := digest.Parse(otherDigestString) + if uncompressedDigest != "" { + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) if err != nil { - return nil, err + return nil, fmt.Errorf("querying for other digests: %w", err) } - if otherDigest != primaryDigest && otherDigest != uncompressedDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) if err != nil { return nil, err } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) } - } - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("iterating through other digests: %w", err) - } - if uncompressedDigest != primaryDigest { - res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) - if err != nil { - return nil, err + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options) + if err != nil { + return nil, err + } } } } diff --git a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go index 4e9986422..31dfdd342 100644 --- a/vendor/github.com/containers/image/v5/signature/fulcio_cert.go +++ b/vendor/github.com/containers/image/v5/signature/fulcio_cert.go @@ -195,10 +195,10 @@ func (f *fulcioTrustRoot) verifyFulcioCertificateAtTime(relevantTime time.Time, return untrustedCertificate.PublicKey, nil } -func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, +func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte, untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string, untrustedPayloadBytes []byte) (crypto.PublicKey, error) { - rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKey, untrustedRekorSET, untrustedCertificateBytes, + rekorSETTime, err := internal.VerifyRekorSET(rekorPublicKeys, untrustedRekorSET, untrustedCertificateBytes, untrustedBase64Signature, untrustedPayloadBytes) if err != nil { return nil, err diff --git a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go index e79c91cf9..604c21c08 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go +++ b/vendor/github.com/containers/image/v5/signature/internal/rekor_set.go @@ -113,7 +113,7 @@ func (p UntrustedRekorPayload) MarshalJSON() ([]byte, error) { // VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data. // Returns bundle upload time on success. -func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { +func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) { // FIXME: Should the publicKey parameter hard-code ecdsa? // == Parse SET bytes @@ -130,7 +130,14 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver return time.Time{}, NewInvalidSignatureError(fmt.Sprintf("canonicalizing Rekor SET JSON: %v", err)) } untrustedSETPayloadHash := sha256.Sum256(untrustedSETPayloadCanonicalBytes) - if !ecdsa.VerifyASN1(publicKey, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + publicKeymatched := false + for _, pk := range publicKeys { + if ecdsa.VerifyASN1(pk, untrustedSETPayloadHash[:], untrustedSET.UntrustedSignedEntryTimestamp) { + publicKeymatched = true + break + } + } + if !publicKeymatched { return time.Time{}, NewInvalidSignatureError("cryptographic signature verification of Rekor SET failed") } diff --git a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go index a2609c954..8630b08e3 100644 --- a/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go +++ b/vendor/github.com/containers/image/v5/signature/internal/sigstore_payload.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + "strings" "time" "github.com/containers/image/v5/version" @@ -171,24 +172,62 @@ type SigstorePayloadAcceptanceRules struct { ValidateSignedDockerManifestDigest func(digest.Digest) error } -// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by publicKey, and that its principal components +// verifySigstorePayloadBlobSignature verifies unverifiedSignature of unverifiedPayload was correctly created +// by any of the public keys in publicKeys. +// +// This is an internal implementation detail of VerifySigstorePayload and should have no other callers. +// It is INSUFFICIENT alone to consider the signature acceptable. +func verifySigstorePayloadBlobSignature(publicKeys []crypto.PublicKey, unverifiedPayload, unverifiedSignature []byte) error { + if len(publicKeys) == 0 { + return errors.New("Need at least one public key to verify the sigstore payload, but got 0") + } + + verifiers := make([]sigstoreSignature.Verifier, 0, len(publicKeys)) + for _, key := range publicKeys { + // Failing to load a verifier indicates that something is really, really + // invalid about the public key; prefer to fail even if the signature might be + // valid with other keys, so that users fix their fallback keys before they need them. + // For that reason, we even initialize all verifiers before trying to validate the signature + // with any key. + verifier, err := sigstoreSignature.LoadVerifier(key, sigstoreHarcodedHashAlgorithm) + if err != nil { + return err + } + verifiers = append(verifiers, verifier) + } + + var failures []string + for _, verifier := range verifiers { + // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), + // which seems to be not used by anything. So we don’t bother. + err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)) + if err == nil { + return nil + } + + failures = append(failures, err.Error()) + } + + if len(failures) == 0 { + // Coverage: We have checked there is at least one public key, any success causes an early return, + // and any failure adds an entry to failures => there must be at least one error. + return fmt.Errorf("Internal error: signature verification failed but no errors have been recorded") + } + return NewInvalidSignatureError("cryptographic signature verification failed: " + strings.Join(failures, ", ")) +} + +// VerifySigstorePayload verifies unverifiedBase64Signature of unverifiedPayload was correctly created by any of the public keys in publicKeys, and that its principal components // match expected values, both as specified by rules, and returns it. // We return an *UntrustedSigstorePayload, although nothing actually uses it, // just to double-check against stupid typos. -func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { - verifier, err := sigstoreSignature.LoadVerifier(publicKey, sigstoreHarcodedHashAlgorithm) - if err != nil { - return nil, fmt.Errorf("creating verifier: %w", err) - } - +func VerifySigstorePayload(publicKeys []crypto.PublicKey, unverifiedPayload []byte, unverifiedBase64Signature string, rules SigstorePayloadAcceptanceRules) (*UntrustedSigstorePayload, error) { unverifiedSignature, err := base64.StdEncoding.DecodeString(unverifiedBase64Signature) if err != nil { return nil, NewInvalidSignatureError(fmt.Sprintf("base64 decoding: %v", err)) } - // github.com/sigstore/cosign/pkg/cosign.verifyOCISignature uses signatureoptions.WithContext(), - // which seems to be not used by anything. So we don’t bother. - if err := verifier.VerifySignature(bytes.NewReader(unverifiedSignature), bytes.NewReader(unverifiedPayload)); err != nil { - return nil, NewInvalidSignatureError(fmt.Sprintf("cryptographic signature verification failed: %v", err)) + + if err := verifySigstorePayloadBlobSignature(publicKeys, unverifiedPayload, unverifiedSignature); err != nil { + return nil, err } var unmatchedPayload UntrustedSigstorePayload diff --git a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go index beb5d0673..965901e18 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_config_sigstore.go @@ -2,7 +2,6 @@ package signature import ( "encoding/json" - "errors" "fmt" "github.com/containers/image/v5/signature/internal" @@ -15,29 +14,57 @@ type PRSigstoreSignedOption func(*prSigstoreSigned) error func PRSigstoreSignedWithKeyPath(keyPath string) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.KeyPath != "" { - return errors.New(`"keyPath" already specified`) + return InvalidPolicyFormatError(`"keyPath" already specified`) } pr.KeyPath = keyPath return nil } } +// PRSigstoreSignedWithKeyPaths specifies a value for the "keyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyPaths(keyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyPaths != nil { + return InvalidPolicyFormatError(`"keyPaths" already specified`) + } + if len(keyPaths) == 0 { + return InvalidPolicyFormatError(`"keyPaths" contains no entries`) + } + pr.KeyPaths = keyPaths + return nil + } +} + // PRSigstoreSignedWithKeyData specifies a value for the "keyData" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithKeyData(keyData []byte) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.KeyData != nil { - return errors.New(`"keyData" already specified`) + return InvalidPolicyFormatError(`"keyData" already specified`) } pr.KeyData = keyData return nil } } +// PRSigstoreSignedWithKeyDatas specifies a value for the "keyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithKeyDatas(keyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.KeyDatas != nil { + return InvalidPolicyFormatError(`"keyDatas" already specified`) + } + if len(keyDatas) == 0 { + return InvalidPolicyFormatError(`"keyDatas" contains no entries`) + } + pr.KeyDatas = keyDatas + return nil + } +} + // PRSigstoreSignedWithFulcio specifies a value for the "fulcio" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.Fulcio != nil { - return errors.New(`"fulcio" already specified`) + return InvalidPolicyFormatError(`"fulcio" already specified`) } pr.Fulcio = fulcio return nil @@ -48,29 +75,57 @@ func PRSigstoreSignedWithFulcio(fulcio PRSigstoreSignedFulcio) PRSigstoreSignedO func PRSigstoreSignedWithRekorPublicKeyPath(rekorPublicKeyPath string) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.RekorPublicKeyPath != "" { - return errors.New(`"rekorPublicKeyPath" already specified`) + return InvalidPolicyFormatError(`"rekorPublicKeyPath" already specified`) } pr.RekorPublicKeyPath = rekorPublicKeyPath return nil } } +// PRSigstoreSignedWithRekorPublicKeyPaths specifies a value for the rRekorPublickeyPaths" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyPaths(rekorPublickeyPaths []string) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyPaths != nil { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" already specified`) + } + if len(rekorPublickeyPaths) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyPaths" contains no entries`) + } + pr.RekorPublicKeyPaths = rekorPublickeyPaths + return nil + } +} + // PRSigstoreSignedWithRekorPublicKeyData specifies a value for the "rekorPublicKeyData" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithRekorPublicKeyData(rekorPublicKeyData []byte) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.RekorPublicKeyData != nil { - return errors.New(`"rekorPublicKeyData" already specified`) + return InvalidPolicyFormatError(`"rekorPublicKeyData" already specified`) } pr.RekorPublicKeyData = rekorPublicKeyData return nil } } +// PRSigstoreSignedWithRekorPublicKeyDatas specifies a value for the "rekorPublickeyDatas" field when calling NewPRSigstoreSigned. +func PRSigstoreSignedWithRekorPublicKeyDatas(rekorPublickeyDatas [][]byte) PRSigstoreSignedOption { + return func(pr *prSigstoreSigned) error { + if pr.RekorPublicKeyDatas != nil { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" already specified`) + } + if len(rekorPublickeyDatas) == 0 { + return InvalidPolicyFormatError(`"rekorPublickeyDatas" contains no entries`) + } + pr.RekorPublicKeyDatas = rekorPublickeyDatas + return nil + } +} + // PRSigstoreSignedWithSignedIdentity specifies a value for the "signedIdentity" field when calling NewPRSigstoreSigned. func PRSigstoreSignedWithSignedIdentity(signedIdentity PolicyReferenceMatch) PRSigstoreSignedOption { return func(pr *prSigstoreSigned) error { if pr.SignedIdentity != nil { - return errors.New(`"signedIdentity" already specified`) + return InvalidPolicyFormatError(`"signedIdentity" already specified`) } pr.SignedIdentity = signedIdentity return nil @@ -92,21 +147,40 @@ func newPRSigstoreSigned(options ...PRSigstoreSignedOption) (*prSigstoreSigned, if res.KeyPath != "" { keySources++ } + if res.KeyPaths != nil { + keySources++ + } if res.KeyData != nil { keySources++ } + if res.KeyDatas != nil { + keySources++ + } if res.Fulcio != nil { keySources++ } if keySources != 1 { - return nil, InvalidPolicyFormatError("exactly one of keyPath, keyData and fulcio must be specified") + return nil, InvalidPolicyFormatError("exactly one of keyPath, keyPaths, keyData, keyDatas and fulcio must be specified") } - if res.RekorPublicKeyPath != "" && res.RekorPublicKeyData != nil { - return nil, InvalidPolicyFormatError("rekorPublickeyType and rekorPublickeyData cannot be used simultaneously") + rekorSources := 0 + if res.RekorPublicKeyPath != "" { + rekorSources++ } - if res.Fulcio != nil && res.RekorPublicKeyPath == "" && res.RekorPublicKeyData == nil { - return nil, InvalidPolicyFormatError("At least one of RekorPublickeyPath and RekorPublickeyData must be specified if fulcio is used") + if res.RekorPublicKeyPaths != nil { + rekorSources++ + } + if res.RekorPublicKeyData != nil { + rekorSources++ + } + if res.RekorPublicKeyDatas != nil { + rekorSources++ + } + if rekorSources > 1 { + return nil, InvalidPolicyFormatError("at most one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas can be used simultaneously") + } + if res.Fulcio != nil && rekorSources == 0 { + return nil, InvalidPolicyFormatError("At least one of rekorPublickeyPath, rekorPublicKeyPaths, rekorPublickeyData and rekorPublicKeyDatas must be specified if fulcio is used") } if res.SignedIdentity == nil { @@ -144,7 +218,8 @@ var _ json.Unmarshaler = (*prSigstoreSigned)(nil) func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { *pr = prSigstoreSigned{} var tmp prSigstoreSigned - var gotKeyPath, gotKeyData, gotFulcio, gotRekorPublicKeyPath, gotRekorPublicKeyData bool + var gotKeyPath, gotKeyPaths, gotKeyData, gotKeyDatas, gotFulcio bool + var gotRekorPublicKeyPath, gotRekorPublicKeyPaths, gotRekorPublicKeyData, gotRekorPublicKeyDatas bool var fulcio prSigstoreSignedFulcio var signedIdentity json.RawMessage if err := internal.ParanoidUnmarshalJSONObject(data, func(key string) any { @@ -154,18 +229,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { case "keyPath": gotKeyPath = true return &tmp.KeyPath + case "keyPaths": + gotKeyPaths = true + return &tmp.KeyPaths case "keyData": gotKeyData = true return &tmp.KeyData + case "keyDatas": + gotKeyDatas = true + return &tmp.KeyDatas case "fulcio": gotFulcio = true return &fulcio case "rekorPublicKeyPath": gotRekorPublicKeyPath = true return &tmp.RekorPublicKeyPath + case "rekorPublicKeyPaths": + gotRekorPublicKeyPaths = true + return &tmp.RekorPublicKeyPaths case "rekorPublicKeyData": gotRekorPublicKeyData = true return &tmp.RekorPublicKeyData + case "rekorPublicKeyDatas": + gotRekorPublicKeyDatas = true + return &tmp.RekorPublicKeyDatas case "signedIdentity": return &signedIdentity default: @@ -192,18 +279,30 @@ func (pr *prSigstoreSigned) UnmarshalJSON(data []byte) error { if gotKeyPath { opts = append(opts, PRSigstoreSignedWithKeyPath(tmp.KeyPath)) } + if gotKeyPaths { + opts = append(opts, PRSigstoreSignedWithKeyPaths(tmp.KeyPaths)) + } if gotKeyData { opts = append(opts, PRSigstoreSignedWithKeyData(tmp.KeyData)) } + if gotKeyDatas { + opts = append(opts, PRSigstoreSignedWithKeyDatas(tmp.KeyDatas)) + } if gotFulcio { opts = append(opts, PRSigstoreSignedWithFulcio(&fulcio)) } if gotRekorPublicKeyPath { opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPath(tmp.RekorPublicKeyPath)) } + if gotRekorPublicKeyPaths { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyPaths(tmp.RekorPublicKeyPaths)) + } if gotRekorPublicKeyData { opts = append(opts, PRSigstoreSignedWithRekorPublicKeyData(tmp.RekorPublicKeyData)) } + if gotRekorPublicKeyDatas { + opts = append(opts, PRSigstoreSignedWithRekorPublicKeyDatas(tmp.RekorPublicKeyDatas)) + } opts = append(opts, PRSigstoreSignedWithSignedIdentity(tmp.SignedIdentity)) res, err := newPRSigstoreSigned(opts...) @@ -221,7 +320,7 @@ type PRSigstoreSignedFulcioOption func(*prSigstoreSignedFulcio) error func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOption { return func(f *prSigstoreSignedFulcio) error { if f.CAPath != "" { - return errors.New(`"caPath" already specified`) + return InvalidPolicyFormatError(`"caPath" already specified`) } f.CAPath = caPath return nil @@ -232,7 +331,7 @@ func PRSigstoreSignedFulcioWithCAPath(caPath string) PRSigstoreSignedFulcioOptio func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOption { return func(f *prSigstoreSignedFulcio) error { if f.CAData != nil { - return errors.New(`"caData" already specified`) + return InvalidPolicyFormatError(`"caData" already specified`) } f.CAData = caData return nil @@ -243,7 +342,7 @@ func PRSigstoreSignedFulcioWithCAData(caData []byte) PRSigstoreSignedFulcioOptio func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFulcioOption { return func(f *prSigstoreSignedFulcio) error { if f.OIDCIssuer != "" { - return errors.New(`"oidcIssuer" already specified`) + return InvalidPolicyFormatError(`"oidcIssuer" already specified`) } f.OIDCIssuer = oidcIssuer return nil @@ -254,7 +353,7 @@ func PRSigstoreSignedFulcioWithOIDCIssuer(oidcIssuer string) PRSigstoreSignedFul func PRSigstoreSignedFulcioWithSubjectEmail(subjectEmail string) PRSigstoreSignedFulcioOption { return func(f *prSigstoreSignedFulcio) error { if f.SubjectEmail != "" { - return errors.New(`"subjectEmail" already specified`) + return InvalidPolicyFormatError(`"subjectEmail" already specified`) } f.SubjectEmail = subjectEmail return nil diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go index 896ca5a60..e5c932918 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_signedby.go @@ -6,7 +6,6 @@ import ( "context" "errors" "fmt" - "os" "slices" "github.com/containers/image/v5/internal/multierr" @@ -27,33 +26,18 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(ctx context.Context, image priva } // FIXME: move this to per-context initialization - var data [][]byte - keySources := 0 - if pr.KeyPath != "" { - keySources++ - d, err := os.ReadFile(pr.KeyPath) - if err != nil { - return sarRejected, nil, err - } - data = [][]byte{d} + const notOneSourceErrorText = `Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified` + data, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: notOneSourceErrorText, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + }) + if err != nil { + return sarRejected, nil, err } - if pr.KeyPaths != nil { - keySources++ - data = [][]byte{} - for _, path := range pr.KeyPaths { - d, err := os.ReadFile(path) - if err != nil { - return sarRejected, nil, err - } - data = append(data, d) - } - } - if pr.KeyData != nil { - keySources++ - data = [][]byte{pr.KeyData} - } - if keySources != 1 { - return sarRejected, nil, errors.New(`Internal inconsistency: not exactly one of "keyPath", "keyPaths" and "keyData" specified`) + if data == nil { + return sarRejected, nil, errors.New(notOneSourceErrorText) } // FIXME: move this to per-context initialization diff --git a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go index 485165077..9c553771c 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go +++ b/vendor/github.com/containers/image/v5/signature/policy_eval_sigstore.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "os" + "strings" "github.com/containers/image/v5/internal/multierr" "github.com/containers/image/v5/internal/private" @@ -20,37 +21,69 @@ import ( "github.com/sigstore/sigstore/pkg/cryptoutils" ) -// loadBytesFromDataOrPath ensures there is at most one of ${prefix}Data and ${prefix}Path set, +// configBytesSources contains configuration fields which may result in one or more []byte values +type configBytesSources struct { + inconsistencyErrorMessage string // Error to return if more than one source is set + path string // …Path: a path to a file containing the data, or "" + paths []string // …Paths: paths to files containing the data, or nil + data []byte // …Data: a single instance ofhe raw data, or nil + datas [][]byte // …Datas: the raw data, or nil // codespell:ignore datas +} + +// loadBytesFromConfigSources ensures at most one of the sources in src is set, // and returns the referenced data, or nil if neither is set. -func loadBytesFromDataOrPath(prefix string, data []byte, path string) ([]byte, error) { - switch { - case data != nil && path != "": - return nil, fmt.Errorf(`Internal inconsistency: both "%sPath" and "%sData" specified`, prefix, prefix) - case path != "": - d, err := os.ReadFile(path) +func loadBytesFromConfigSources(src configBytesSources) ([][]byte, error) { + sources := 0 + var data [][]byte // = nil + if src.path != "" { + sources++ + d, err := os.ReadFile(src.path) if err != nil { return nil, err } - return d, nil - case data != nil: - return data, nil - default: // Nothing - return nil, nil + data = [][]byte{d} } + if src.paths != nil { + sources++ + data = [][]byte{} + for _, path := range src.paths { + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + data = append(data, d) + } + } + if src.data != nil { + sources++ + data = [][]byte{src.data} + } + if src.datas != nil { // codespell:ignore datas + sources++ + data = src.datas // codespell:ignore datas + } + if sources > 1 { + return nil, errors.New(src.inconsistencyErrorMessage) + } + return data, nil } // prepareTrustRoot creates a fulcioTrustRoot from the input data. // (This also prevents external implementations of this interface, ensuring that prSigstoreSignedFulcio is the only one.) func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { - caCertBytes, err := loadBytesFromDataOrPath("fulcioCA", f.CAData, f.CAPath) + caCertPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "caPath" and "caData" specified`, + path: f.CAPath, + data: f.CAData, + }) if err != nil { return nil, err } - if caCertBytes == nil { - return nil, errors.New(`Internal inconsistency: Fulcio specified with neither "caPath" nor "caData"`) + if len(caCertPEMs) != 1 { + return nil, errors.New(`Internal inconsistency: Fulcio specified with not exactly one of "caPath" nor "caData"`) } certs := x509.NewCertPool() - if ok := certs.AppendCertsFromPEM(caCertBytes); !ok { + if ok := certs.AppendCertsFromPEM(caCertPEMs[0]); !ok { return nil, errors.New("error loading Fulcio CA certificates") } fulcio := fulcioTrustRoot{ @@ -66,24 +99,35 @@ func (f *prSigstoreSignedFulcio) prepareTrustRoot() (*fulcioTrustRoot, error) { // sigstoreSignedTrustRoot contains an already parsed version of the prSigstoreSigned policy type sigstoreSignedTrustRoot struct { - publicKey crypto.PublicKey - fulcio *fulcioTrustRoot - rekorPublicKey *ecdsa.PublicKey + publicKeys []crypto.PublicKey + fulcio *fulcioTrustRoot + rekorPublicKeys []*ecdsa.PublicKey } func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) { res := sigstoreSignedTrustRoot{} - publicKeyPEM, err := loadBytesFromDataOrPath("key", pr.KeyData, pr.KeyPath) + publicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: more than one of "keyPath", "keyPaths", "keyData", "keyDatas" specified`, + path: pr.KeyPath, + paths: pr.KeyPaths, + data: pr.KeyData, + datas: pr.KeyDatas, // codespell:ignore datas + }) if err != nil { return nil, err } - if publicKeyPEM != nil { - pk, err := cryptoutils.UnmarshalPEMToPublicKey(publicKeyPEM) - if err != nil { - return nil, fmt.Errorf("parsing public key: %w", err) + if publicKeyPEMs != nil { + for index, keyData := range publicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(keyData) + if err != nil { + return nil, fmt.Errorf("parsing public key %d: %w", index+1, err) + } + res.publicKeys = append(res.publicKeys, pk) + } + if len(res.publicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "keyPath", "keyPaths", "keyData" and "keyDatas" produced no public keys`) } - res.publicKey = pk } if pr.Fulcio != nil { @@ -94,21 +138,32 @@ func (pr *prSigstoreSigned) prepareTrustRoot() (*sigstoreSignedTrustRoot, error) res.fulcio = f } - rekorPublicKeyPEM, err := loadBytesFromDataOrPath("rekorPublicKey", pr.RekorPublicKeyData, pr.RekorPublicKeyPath) + rekorPublicKeyPEMs, err := loadBytesFromConfigSources(configBytesSources{ + inconsistencyErrorMessage: `Internal inconsistency: both "rekorPublicKeyPath" and "rekorPublicKeyData" specified`, + path: pr.RekorPublicKeyPath, + paths: pr.RekorPublicKeyPaths, + data: pr.RekorPublicKeyData, + datas: pr.RekorPublicKeyDatas, // codespell:ignore datas + }) if err != nil { return nil, err } - if rekorPublicKeyPEM != nil { - pk, err := cryptoutils.UnmarshalPEMToPublicKey(rekorPublicKeyPEM) - if err != nil { - return nil, fmt.Errorf("parsing Rekor public key: %w", err) - } - pkECDSA, ok := pk.(*ecdsa.PublicKey) - if !ok { - return nil, fmt.Errorf("Rekor public key is not using ECDSA") + if rekorPublicKeyPEMs != nil { + for index, pem := range rekorPublicKeyPEMs { + pk, err := cryptoutils.UnmarshalPEMToPublicKey(pem) + if err != nil { + return nil, fmt.Errorf("parsing Rekor public key %d: %w", index+1, err) + } + pkECDSA, ok := pk.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("Rekor public key %d is not using ECDSA", index+1) + } + res.rekorPublicKeys = append(res.rekorPublicKeys, pkECDSA) + } + if len(res.rekorPublicKeys) == 0 { + return nil, errors.New(`Internal inconsistency: "rekorPublicKeyPath", "rekorPublicKeyPaths", "rekorPublicKeyData" and "rekorPublicKeyDatas" produced no public keys`) } - res.rekorPublicKey = pkECDSA } return &res, nil @@ -134,37 +189,51 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva } untrustedPayload := sig.UntrustedPayload() - var publicKey crypto.PublicKey + var publicKeys []crypto.PublicKey switch { - case trustRoot.publicKey != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations. + case trustRoot.publicKeys != nil && trustRoot.fulcio != nil: // newPRSigstoreSigned rejects such combinations. return sarRejected, errors.New("Internal inconsistency: Both a public key and Fulcio CA specified") - case trustRoot.publicKey == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations. + case trustRoot.publicKeys == nil && trustRoot.fulcio == nil: // newPRSigstoreSigned rejects such combinations. return sarRejected, errors.New("Internal inconsistency: Neither a public key nor a Fulcio CA specified") - case trustRoot.publicKey != nil: - if trustRoot.rekorPublicKey != nil { + case trustRoot.publicKeys != nil: + if trustRoot.rekorPublicKeys != nil { untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] if !ok { // For user convenience; passing an empty []byte to VerifyRekorSet should work. return sarRejected, fmt.Errorf("missing %s annotation", signature.SigstoreSETAnnotationKey) } - // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. - // FIXME: We could just generate DER instead of the full PEM text - recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(trustRoot.publicKey) - if err != nil { - // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. - // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) - return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + var rekorFailures []string + for _, candidatePublicKey := range trustRoot.publicKeys { + // We could use publicKeyPEM directly, but let’s re-marshal to avoid inconsistencies. + // FIXME: We could just generate DER instead of the full PEM text + recreatedPublicKeyPEM, err := cryptoutils.MarshalPublicKeyToPEM(candidatePublicKey) + if err != nil { + // Coverage: The key was loaded from a PEM format, so it’s unclear how this could fail. + // (PEM is not essential, MarshalPublicKeyToPEM can only fail if marshaling to ASN1.DER fails.) + return sarRejected, fmt.Errorf("re-marshaling public key to PEM: %w", err) + } + // We don’t care about the Rekor timestamp, just about log presence. + _, err = internal.VerifyRekorSET(trustRoot.rekorPublicKeys, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload) + if err == nil { + publicKeys = append(publicKeys, candidatePublicKey) + break // The SET can only accept one public key entry, so if we found one, the rest either doesn’t match or is a duplicate + } + rekorFailures = append(rekorFailures, err.Error()) } - // We don’t care about the Rekor timestamp, just about log presence. - if _, err := internal.VerifyRekorSET(trustRoot.rekorPublicKey, []byte(untrustedSET), recreatedPublicKeyPEM, untrustedBase64Signature, untrustedPayload); err != nil { - return sarRejected, err + if len(publicKeys) == 0 { + if len(rekorFailures) == 0 { + // Coverage: We have ensured that len(trustRoot.publicKeys) != 0, when nothing succeeds, there must be at least one failure. + return sarRejected, errors.New(`Internal inconsistency: Rekor SET did not match any key but we have no failures.`) + } + return sarRejected, internal.NewInvalidSignatureError(fmt.Sprintf("No public key verified against the RekorSET: %s", strings.Join(rekorFailures, ", "))) } + } else { + publicKeys = trustRoot.publicKeys } - publicKey = trustRoot.publicKey case trustRoot.fulcio != nil: - if trustRoot.rekorPublicKey == nil { // newPRSigstoreSigned rejects such combinations. + if trustRoot.rekorPublicKeys == nil { // newPRSigstoreSigned rejects such combinations. return sarRejected, errors.New("Internal inconsistency: Fulcio CA specified without a Rekor public key") } untrustedSET, ok := untrustedAnnotations[signature.SigstoreSETAnnotationKey] @@ -179,19 +248,20 @@ func (pr *prSigstoreSigned) isSignatureAccepted(ctx context.Context, image priva if untrustedIntermediateChain, ok := untrustedAnnotations[signature.SigstoreIntermediateCertificateChainAnnotationKey]; ok { untrustedIntermediateChainBytes = []byte(untrustedIntermediateChain) } - pk, err := verifyRekorFulcio(trustRoot.rekorPublicKey, trustRoot.fulcio, + pk, err := verifyRekorFulcio(trustRoot.rekorPublicKeys, trustRoot.fulcio, []byte(untrustedSET), []byte(untrustedCert), untrustedIntermediateChainBytes, untrustedBase64Signature, untrustedPayload) if err != nil { return sarRejected, err } - publicKey = pk + publicKeys = []crypto.PublicKey{pk} } - if publicKey == nil { - // Coverage: This should never happen, we have already excluded the possibility in the switch above. + if len(publicKeys) == 0 { + // Coverage: This should never happen, we ensured that trustRoot.publicKeys is non-empty if set, + // and we have already excluded the possibility in the switch above. return sarRejected, fmt.Errorf("Internal inconsistency: publicKey not set before verifying sigstore payload") } - signature, err := internal.VerifySigstorePayload(publicKey, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ + signature, err := internal.VerifySigstorePayload(publicKeys, untrustedPayload, untrustedBase64Signature, internal.SigstorePayloadAcceptanceRules{ ValidateSignedDockerReference: func(ref string) error { if !pr.SignedIdentity.matchesDockerReference(image, ref) { return PolicyRequirementError(fmt.Sprintf("Signature for identity %q is not accepted", ref)) diff --git a/vendor/github.com/containers/image/v5/signature/policy_types.go b/vendor/github.com/containers/image/v5/signature/policy_types.go index 96e91a0a9..32aa1c0ad 100644 --- a/vendor/github.com/containers/image/v5/signature/policy_types.go +++ b/vendor/github.com/containers/image/v5/signature/policy_types.go @@ -74,7 +74,7 @@ type prSignedBy struct { // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. KeyPath string `json:"keyPath,omitempty"` - // KeyPaths if a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths and KeyData must be specified. KeyPaths []string `json:"keyPaths,omitempty"` // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath, KeyPaths and KeyData must be specified. KeyData []byte `json:"keyData,omitempty"` @@ -111,24 +111,35 @@ type prSignedBaseLayer struct { type prSigstoreSigned struct { prCommon - // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyData, Fulcio must be specified. + // KeyPath is a pathname to a local file containing the trusted key. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. KeyPath string `json:"keyPath,omitempty"` - // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyData, Fulcio must be specified. + // KeyPaths is a set of pathnames to local files containing the trusted key(s). Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + KeyPaths []string `json:"keyPaths,omitempty"` + // KeyData contains the trusted key, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. KeyData []byte `json:"keyData,omitempty"` - // FIXME: Multiple public keys? + // KeyDatas is a set of trusted keys, base64-encoded. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. + KeyDatas [][]byte `json:"keyDatas,omitempty"` - // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyData, Fulcio must be specified. + // Fulcio specifies which Fulcio-generated certificates are accepted. Exactly one of KeyPath, KeyPaths, KeyData, KeyDatas and Fulcio must be specified. // If Fulcio is specified, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well. Fulcio PRSigstoreSignedFulcio `json:"fulcio,omitempty"` // RekorPublicKeyPath is a pathname to local file containing a public key of a Rekor server which must record acceptable signatures. - // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional - // (and Rekor inclusion is not required if a Rekor public key is not specified). + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). RekorPublicKeyPath string `json:"rekorPublicKeyPath,omitempty"` + // RekorPublicKeyPaths is a set of pathnames to local files, each containing a public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyPaths []string `json:"rekorPublicKeyPaths,omitempty"` // RekorPublicKeyPath contain a base64-encoded public key of a Rekor server which must record acceptable signatures. - // If Fulcio is used, one of RekorPublicKeyPath or RekorPublicKeyData must be specified as well; otherwise it is optional - // (and Rekor inclusion is not required if a Rekor public key is not specified). + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). RekorPublicKeyData []byte `json:"rekorPublicKeyData,omitempty"` + // RekorPublicKeyDatas each contain a base64-encoded public key of a Rekor server. One of the keys must record acceptable signatures. + // If Fulcio is used, one of RekorPublicKeyPath, RekorPublicKeyPaths, RekorPublicKeyData and RekorPublicKeyDatas must be specified as well; + // otherwise it is optional (and Rekor inclusion is not required if a Rekor public key is not specified). + RekorPublicKeyDatas [][]byte `json:"rekorPublicKeyDatas,omitempty"` // SignedIdentity specifies what image identity the signature must be claiming about the image. // Defaults to "matchRepoDigestOrExact" if not specified. diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 92dfebd33..842a3ab06 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -84,18 +84,36 @@ type storageImageDestinationLockProtected struct { currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed) indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image - // In general, a layer is identified either by (compressed) digest, or by TOC digest. + // Externally, a layer is identified either by (compressed) digest, or by TOC digest + // (and we assume the TOC digest also uniquely identifies the contents, i.e. there aren’t two + // different formats/ways to parse a single TOC); internally, we use uncompressed digest (“DiffID”) or a TOC digest. + // We may or may not know the relationships between these three values. + // // When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values // we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not - // recompute those values for incomding layers — the point of the reuse is that we don’t need to consume the incoming layer.) - - // Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called. - // The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted. - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest + // recompute those values for incoming layers — the point of the reuse is that we don’t need to consume the incoming layer.) + // + // Layer identification: For a layer, at least one of (indexToDiffID, indexToTOCDigest, blobDiffIDs) must be available + // before commitLayer is called. + // The layer is identified by the first of the three fields which exists, in that order (and the value must be trusted). + // + // WARNING: All values in indexToDiffID, indexToTOCDigest, and blobDiffIDs are _individually_ trusted, but blobDiffIDs is more subtle. + // The values in indexTo* are all consistent, because the code writing them processed them all at once, and consistently. + // But it is possible for a layer’s indexToDiffID an indexToTOCDigest to be based on a TOC, without setting blobDiffIDs + // for the compressed digest of that index, and for blobDiffIDs[compressedDigest] to be set _separately_ while processing some + // other layer entry. In particular it is possible for indexToDiffID[index] and blobDiffIDs[compressedDigestAtIndex]] to refer + // to mismatching contents. + // Users of these fields should use trustedLayerIdentityDataLocked, which centralizes the validity logic, + // instead of interpreting these fields, especially blobDiffIDs, directly. + // + // Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller + // to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases. + indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID + indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above. // Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames) - // should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer. + // should be available; or indexToDiffID/indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer. // They are looked up in the order they are mentioned above. diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer @@ -145,9 +163,12 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (* }, indexToStorageID: make(map[int]string), lockProtected: storageImageDestinationLockProtected{ - indexToAddedLayerInfo: make(map[int]addedLayerInfo), - blobDiffIDs: make(map[digest.Digest]digest.Digest), - indexToTOCDigest: make(map[int]digest.Digest), + indexToAddedLayerInfo: make(map[int]addedLayerInfo), + + indexToDiffID: make(map[int]digest.Digest), + indexToTOCDigest: make(map[int]digest.Digest), + blobDiffIDs: make(map[digest.Digest]digest.Digest), + diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput), indexToAdditionalLayer: make(map[int]storage.AdditionalLayer), filenames: make(map[digest.Digest]string), @@ -323,20 +344,30 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces s.lock.Lock() if out.UncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest + if out.TOCDigest != "" { + options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest) + } + // Don’t set indexToTOCDigest on this path: + // - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID. + // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. + // That TOC is quite unlikely to match any other TOC value. + // The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is // responsible for ensuring blobDigest has been validated. if out.CompressedDigest != blobDigest { return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q", out.CompressedDigest, blobDigest) } - s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest + // So, record also information about blobDigest, that might benefit reuse. // We trust ApplyDiffWithDiffer to validate or create both values correctly. + s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest) } else { - // Don’t identify layers by TOC if UncompressedDigest is available. - // - Using UncompressedDigest allows image reuse with non-partially-pulled layers - // - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch. - // That TOC is quite unlikely to match with any other TOC value. + // Use diffID for layer identity if it is known. + if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" { + s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest + } s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest } s.lockProtected.diffOutputs[options.LayerIndex] = out @@ -465,49 +496,40 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) } - if len(layers) > 0 { - if size != -1 { - s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest - return true, private.ReusedBlob{ - Digest: blobDigest, - Size: size, - }, nil - } - if !options.CanSubstitute { - return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest) - } - s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest - return true, private.ReusedBlob{ - Digest: uncompressedDigest, - Size: layers[0].UncompressedSize, - }, nil + if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found { + s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest + return true, reused, nil } } } if options.TOCDigest != "" && options.LayerIndex != nil { + // Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that. + // Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse. + uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest) + if uncompressedDigest != "" { + layers, err = s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest) + if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { + return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err) + } + if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found { + s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest + reused.MatchedByTOCDigest = true + return true, reused, nil + } + } // Check if we have a chunked layer in storage with the same TOC digest. layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest) - if err != nil && !errors.Is(err, storage.ErrLayerUnknown) { return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err) } - if len(layers) > 0 { - if size != -1 { - s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest - return true, private.ReusedBlob{ - Digest: blobDigest, - Size: size, - MatchedByTOCDigest: true, - }, nil - } else if options.CanSubstitute && layers[0].UncompressedDigest != "" { - s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest - return true, private.ReusedBlob{ - Digest: layers[0].UncompressedDigest, - Size: layers[0].UncompressedSize, - MatchedByTOCDigest: true, - }, nil + if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found { + if uncompressedDigest != "" { + s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest } + s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest + reused.MatchedByTOCDigest = true + return true, reused, nil } } @@ -515,49 +537,137 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige return false, private.ReusedBlob{}, nil } +// reusedBlobFromLayerLookup returns (true, ReusedBlob) if layers contain a usable match; or (false, ...) if not. +// The caller is still responsible for setting the layer identification fields, to allow the layer to be found again. +func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest, blobSize int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob) { + if len(layers) > 0 { + if blobSize != -1 { + return true, private.ReusedBlob{ + Digest: blobDigest, + Size: blobSize, + } + } else if options.CanSubstitute && layers[0].UncompressedDigest != "" { + return true, private.ReusedBlob{ + Digest: layers[0].UncompressedDigest, + Size: layers[0].UncompressedSize, + CompressionOperation: types.Decompress, + CompressionAlgorithm: nil, + } + } + } + return false, private.ReusedBlob{} +} + +// trustedLayerIdentityData is a _consistent_ set of information known about a single layer. +type trustedLayerIdentityData struct { + layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID + + diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC + tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC + blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted. +} + +// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest). +// blobDigest is the (possibly-compressed) layer digest referenced in the manifest. +// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available. +// +// The caller must hold s.lock. +func (s *storageImageDestination) trustedLayerIdentityDataLocked(layerIndex int, blobDigest digest.Digest) (trustedLayerIdentityData, bool) { + // The decision about layerIdentifiedByTOC must be _stable_ once the data for layerIndex is set, + // even if s.lockProtected.blobDiffIDs changes later and we can subsequently find an entry that wasn’t originally available. + // + // If we previously didn't have a blobDigest match and decided to use the TOC, but _later_ we happen to find + // a blobDigest match, we might in principle want to reconsider, set layerIdentifiedByTOC to false, and use the file: + // but the layer in question, and possibly child layers, might already have been committed to storage. + // A late-arriving addition to s.lockProtected.blobDiffIDs would mean that we would want to set + // new layer IDs for potentially the whole parent chain = throw away the just-created layers and create them all again. + // + // Such a within-image layer reuse is expected to be pretty rare; instead, ignore the unexpected file match + // and proceed to the originally-planned TOC match. + + res := trustedLayerIdentityData{} + diffID, layerIdentifiedByDiffID := s.lockProtected.indexToDiffID[layerIndex] + if layerIdentifiedByDiffID { + res.layerIdentifiedByTOC = false + res.diffID = diffID + } + if tocDigest, ok := s.lockProtected.indexToTOCDigest[layerIndex]; ok { + res.tocDigest = tocDigest + if !layerIdentifiedByDiffID { + res.layerIdentifiedByTOC = true + } + } + if otherDiffID, ok := s.lockProtected.blobDiffIDs[blobDigest]; ok { + if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC { + // This is the only data we have, so it is clearly self-consistent. + res.layerIdentifiedByTOC = false + res.diffID = otherDiffID + res.blobDigest = blobDigest + layerIdentifiedByDiffID = true + } else { + // We have set up the layer identity without referring to blobDigest: + // an attacker might have used a manifest with non-matching tocDigest and blobDigest. + // But, if we know a trusted diffID value from other sources, and it matches the one for blobDigest, + // we know blobDigest is fine as well. + if res.diffID != "" && otherDiffID == res.diffID { + res.blobDigest = blobDigest + } + } + } + if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC { + return trustedLayerIdentityData{}, false // We found nothing at all + } + return res, true +} + // computeID computes a recommended image ID based on information we have so far. If // the manifest is not of a type that we recognize, we return an empty value, indicating // that since we don't have a recommendation, a random ID should be used if one needs // to be allocated. -func (s *storageImageDestination) computeID(m manifest.Manifest) string { +func (s *storageImageDestination) computeID(m manifest.Manifest) (string, error) { // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. + layerInfos := m.LayerInfos() + // Build the diffID list. We need the decompressed sums that we've been calculating to // fill in the DiffIDs. It's expected (but not enforced by us) that the number of // diffIDs corresponds to the number of non-EmptyLayer entries in the history. var diffIDs []digest.Digest - switch m := m.(type) { + switch m.(type) { case *manifest.Schema1: - // Build a list of the diffIDs we've generated for the non-throwaway FS layers, - // in reverse of the order in which they were originally listed. - for i, compat := range m.ExtractedV1Compatibility { - if compat.ThrowAway { + // Build a list of the diffIDs we've generated for the non-throwaway FS layers + for i, li := range layerInfos { + if li.EmptyLayer { continue } - blobSum := m.FSLayers[i].BlobSum - diffID, ok := s.lockProtected.blobDiffIDs[blobSum] - if !ok { - // this can, in principle, legitimately happen when a layer is reused by TOC. - logrus.Infof("error looking up diffID for layer %q", blobSum.String()) - return "" + trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest) + if !ok { // We have already committed all layers if we get to this point, so the data must have been available. + return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest) } - diffIDs = append([]digest.Digest{diffID}, diffIDs...) + if trusted.diffID == "" { + if trusted.layerIdentifiedByTOC { + logrus.Infof("v2s1 image uses a layer identified by TOC with unknown diffID; choosing a random image ID") + return "", nil + } + return "", fmt.Errorf("internal inconsistency: layer (%d, %q) is not identified by TOC and has no diffID", i, li.Digest) + } + diffIDs = append(diffIDs, trusted.diffID) } case *manifest.Schema2, *manifest.OCI1: // We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate // the diffID list. default: - return "" + return "", nil } // We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption. // // Traditionally that means the same ~config digest, as computed by m.ImageID; - // but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest, + // but if we identify a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest, // nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see // most of the data. // - // So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC, + // So, if a layer is identified by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC, // must enter into the image ID computation. // But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades, // and to introduce the changed behavior only when partial pulls are used. @@ -566,28 +676,31 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string { // (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above. ordinaryImageID, err := m.ImageID(diffIDs) if err != nil { - return "" + return "", err } tocIDInput := "" hasLayerPulledByTOC := false - for i := range m.LayerInfos() { - layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case. - tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC - if ok { + for i, li := range layerInfos { + trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest) + if !ok { // We have already committed all layers if we get to this point, so the data must have been available. + return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest) + } + layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case. + if trusted.layerIdentifiedByTOC { hasLayerPulledByTOC = true - layerValue = tocDigest.String() + layerValue = trusted.tocDigest.String() } tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator. } if !hasLayerPulledByTOC { - return ordinaryImageID + return ordinaryImageID, nil } // ordinaryImageID is a digest of a config, which is a JSON value. // To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON. tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded() logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID) - return tocImageID + return tocImageID, nil } // getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig @@ -671,14 +784,14 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig s.lock.Lock() defer s.lock.Unlock() - if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found { - return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. + trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest) + if !ok { + return "", false } - - if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found { - return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value. + if trusted.layerIdentifiedByTOC { + return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous. } - return "", false + return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value. } // commitLayer commits the specified layer with the given index to the storage. @@ -778,6 +891,16 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D diffOutput, ok := s.lockProtected.diffOutputs[index] s.lock.Unlock() if ok { + // If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput. + // That way it will be persisted in storage even if the cache is deleted; also + // we can use the value below to avoid the untrustedUncompressedDigest logic (and notably + // the costly commit delay until a manifest is available). + s.lock.Lock() + if d, ok := s.lockProtected.indexToDiffID[index]; ok { + diffOutput.UncompressedDigest = d + } + s.lock.Unlock() + var untrustedUncompressedDigest digest.Digest if diffOutput.UncompressedDigest == "" { d, err := s.untrustedLayerDiffID(index) @@ -832,47 +955,43 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // Check if we previously cached a file with that blob's contents. If we didn't, // then we need to read the desired contents from a layer. - var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions + var filename string + var gotFilename bool s.lock.Lock() - tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set - optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set - filename, gotFilename := s.lockProtected.filenames[layerDigest] + trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest) + if ok && trusted.blobDigest != "" { + filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest] + } s.lock.Unlock() - if gotFilename && tocDigest == "" { - // If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based, - // and we don't know the relationship of the layerDigest and TOC digest. - // We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer - // reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the - // originally-planned TOC match. - - // Because tocDigest == "", optionaldiffID must have been set; and even if it weren’t, PutLayer will recompute the digest from the stream. - trustedUncompressedDigest = optionalDiffID - trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching. + if !ok { // We have already determined newLayerID, so the data must have been available. + return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest) + } + var trustedOriginalDigest digest.Digest // For storage.LayerOptions + if gotFilename { + // The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest. + trustedOriginalDigest = trusted.blobDigest } else { // Try to find the layer with contents matching the data we use. var layer *storage.Layer // = nil - if tocDigest != "" { - layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest) - if err2 == nil && len(layers) > 0 { + if trusted.diffID != "" { + if layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(trusted.diffID); err2 == nil && len(layers) > 0 { layer = &layers[0] - } else { - return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2) - } - } else { - // Because tocDigest == "", optionaldiffID must have been set - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID) - if err2 == nil && len(layers) > 0 { - layer = &layers[0] - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest) - if err2 == nil && len(layers) > 0 { - layer = &layers[0] - } - } - if layer == nil { - return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2) } } + if layer == nil && trusted.tocDigest != "" { + if layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(trusted.tocDigest); err2 == nil && len(layers) > 0 { + layer = &layers[0] + } + } + if layer == nil && trusted.blobDigest != "" { + if layers, err2 := s.imageRef.transport.store.LayersByCompressedDigest(trusted.blobDigest); err2 == nil && len(layers) > 0 { + layer = &layers[0] + } + } + if layer == nil { + return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID) + } + // Read the layer's contents. noCompression := archive.Uncompressed diffOptions := &storage.DiffOptions{ @@ -880,7 +999,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D } diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions) if err2 != nil { - return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2) + return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2) } // Copy the layer diff to a file. Diff() takes a lock that it holds // until the ReadCloser that it returns is closed, and PutLayer() wants @@ -902,20 +1021,19 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D return nil, fmt.Errorf("storing blob to file %q: %w", filename, err) } - if optionalDiffID == "" && layer.UncompressedDigest != "" { - optionalDiffID = layer.UncompressedDigest + if trusted.diffID == "" && layer.UncompressedDigest != "" { + trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now. } - // The stream we have is uncompressed, this matches contents of the stream. - // If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream. - trustedUncompressedDigest = optionalDiffID - // FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted). + // The stream we have is uncompressed, and it matches trusted.diffID (if known). + // + // FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse. // But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created // layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream). // // We can legitimately set storage.LayerOptions.OriginalDigest to "", - // but that would just result in PutLayer computing the digest of the input stream == optionalDiffID. + // but that would just result in PutLayer computing the digest of the input stream == trusted.diffID. // So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation. - trustedOriginalDigest = optionalDiffID + trustedOriginalDigest = trusted.diffID // Allow using the already-collected layer contents without extracting the layer again. // @@ -923,11 +1041,11 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // We don’t have the original compressed data here to trivially set filenames[layerDigest]. // In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API. // Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity. - if trustedUncompressedDigest != "" { + if trusted.diffID != "" { s.lock.Lock() - s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest - s.lockProtected.filenames[trustedUncompressedDigest] = filename - s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize + s.lockProtected.blobDiffIDs[trusted.diffID] = trusted.diffID + s.lockProtected.filenames[trusted.diffID] = filename + s.lockProtected.fileSizes[trusted.diffID] = fileSize s.lock.Unlock() } } @@ -940,11 +1058,12 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D // Build the new layer using the diff, regardless of where it came from. // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{ - OriginalDigest: trustedOriginalDigest, - UncompressedDigest: trustedUncompressedDigest, + OriginalDigest: trustedOriginalDigest, + // This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream. + UncompressedDigest: trusted.diffID, }, file) if err != nil && !errors.Is(err, storage.ErrDuplicateID) { - return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err) + return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err) } return layer, nil } @@ -1155,7 +1274,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t // Create the image record, pointing to the most-recently added layer. intendedID := s.imageRef.id if intendedID == "" { - intendedID = s.computeID(man) + intendedID, err = s.computeID(man) + if err != nil { + return err + } } oldNames := []string{} img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 4d889f809..64e468725 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -8,7 +8,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 32 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 2 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go index e7d125c5a..a1551dbc8 100644 --- a/vendor/github.com/golang/glog/glog_file.go +++ b/vendor/github.com/golang/glog/glog_file.go @@ -369,9 +369,6 @@ func (s *fileSink) Flush() error { // flush flushes all logs of severity threshold or greater. func (s *fileSink) flush(threshold logsink.Severity) error { - s.mu.Lock() - defer s.mu.Unlock() - var firstErr error updateErr := func(err error) { if err != nil && firstErr == nil { @@ -379,13 +376,23 @@ func (s *fileSink) flush(threshold logsink.Severity) error { } } - // Flush from fatal down, in case there's trouble flushing. - for sev := logsink.Fatal; sev >= threshold; sev-- { - file := s.file[sev] - if file != nil { - updateErr(file.Flush()) - updateErr(file.Sync()) + // Remember where we flushed, so we can call sync without holding + // the lock. + var files []flushSyncWriter + func() { + s.mu.Lock() + defer s.mu.Unlock() + // Flush from fatal down, in case there's trouble flushing. + for sev := logsink.Fatal; sev >= threshold; sev-- { + if file := s.file[sev]; file != nil { + updateErr(file.Flush()) + files = append(files, file) + } } + }() + + for _, file := range files { + updateErr(file.Sync()) } return firstErr diff --git a/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go b/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go new file mode 100644 index 000000000..d4d6019ff --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/iterator/iterator.go @@ -0,0 +1,63 @@ +// Copyright 2024, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +//go:build go1.23 + +// Package iterator contains helper for working with iterators. It is meant for +// internal use only by the Go Client Libraries. +package iterator + +import ( + "iter" + + otherit "google.golang.org/api/iterator" +) + +// RangeAdapter transforms client iterator type into a [iter.Seq2] that can +// be used with Go's range expressions. +// +// This is for internal use only. +func RangeAdapter[T any](next func() (T, error)) iter.Seq2[T, error] { + var err error + return func(yield func(T, error) bool) { + for { + if err != nil { + return + } + var resp T + resp, err = next() + if err == otherit.Done { + return + } + if !yield(resp, err) { + return + } + } + } +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go index e5d890c26..ad025ad52 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth_table.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -4,20 +4,21 @@ package runewidth var combining = table{ {0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3}, - {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01}, - {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0}, - {0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF}, + {0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0CF3, 0x0CF3}, + {0x0D00, 0x0D01}, {0x135D, 0x135F}, {0x1A7F, 0x1A7F}, + {0x1AB0, 0x1ACE}, {0x1B6B, 0x1B73}, {0x1DC0, 0x1DFF}, {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF}, {0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A}, - {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301}, - {0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374}, - {0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, + {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x10F82, 0x10F85}, + {0x11300, 0x11301}, {0x1133B, 0x1133C}, {0x11366, 0x1136C}, + {0x11370, 0x11374}, {0x16AF0, 0x16AF4}, {0x1CF00, 0x1CF2D}, + {0x1CF30, 0x1CF46}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, - {0x1E8D0, 0x1E8D6}, + {0x1E08F, 0x1E08F}, {0x1E8D0, 0x1E8D6}, } var doublewidth = table{ @@ -33,33 +34,34 @@ var doublewidth = table{ {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, - {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, - {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, - {0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3}, - {0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF}, - {0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C}, - {0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, - {0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, - {0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, - {0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5}, - {0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152}, - {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004}, - {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, - {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, - {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320}, - {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, - {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, - {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, - {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, - {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, - {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, - {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7}, - {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, - {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978}, - {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74}, - {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8}, - {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6}, - {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, + {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x303E}, + {0x3041, 0x3096}, {0x3099, 0x30FF}, {0x3105, 0x312F}, + {0x3131, 0x318E}, {0x3190, 0x31E3}, {0x31EF, 0x321E}, + {0x3220, 0x3247}, {0x3250, 0x4DBF}, {0x4E00, 0xA48C}, + {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3}, + {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52}, + {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60}, + {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, {0x16FF0, 0x16FF1}, + {0x17000, 0x187F7}, {0x18800, 0x18CD5}, {0x18D00, 0x18D08}, + {0x1AFF0, 0x1AFF3}, {0x1AFF5, 0x1AFFB}, {0x1AFFD, 0x1AFFE}, + {0x1B000, 0x1B122}, {0x1B132, 0x1B132}, {0x1B150, 0x1B152}, + {0x1B155, 0x1B155}, {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, + {0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, + {0x1F191, 0x1F19A}, {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, + {0x1F240, 0x1F248}, {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, + {0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, + {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, + {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, + {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, + {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, + {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, + {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, + {0x1F6D5, 0x1F6D7}, {0x1F6DC, 0x1F6DF}, {0x1F6EB, 0x1F6EC}, + {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, {0x1F7F0, 0x1F7F0}, + {0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F9FF}, + {0x1FA70, 0x1FA7C}, {0x1FA80, 0x1FA88}, {0x1FA90, 0x1FABD}, + {0x1FABF, 0x1FAC5}, {0x1FACE, 0x1FADB}, {0x1FAE0, 0x1FAE8}, + {0x1FAF0, 0x1FAF8}, {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD}, } var ambiguous = table{ @@ -154,43 +156,43 @@ var neutral = table{ {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F}, {0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F}, {0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4}, - {0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A}, - {0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D}, - {0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E}, - {0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7}, - {0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990}, - {0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, - {0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, - {0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, - {0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, - {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, - {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, - {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, - {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, - {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, - {0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, - {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, - {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, - {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, - {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, - {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, - {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, - {0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, - {0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, - {0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, - {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, - {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, - {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, - {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, - {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, - {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, - {0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, - {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, + {0x0600, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1}, + {0x07C0, 0x07FA}, {0x07FD, 0x082D}, {0x0830, 0x083E}, + {0x0840, 0x085B}, {0x085E, 0x085E}, {0x0860, 0x086A}, + {0x0870, 0x088E}, {0x0890, 0x0891}, {0x0898, 0x0983}, + {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8}, + {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, + {0x09BC, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CE}, + {0x09D7, 0x09D7}, {0x09DC, 0x09DD}, {0x09DF, 0x09E3}, + {0x09E6, 0x09FE}, {0x0A01, 0x0A03}, {0x0A05, 0x0A0A}, + {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, + {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, + {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, {0x0A47, 0x0A48}, + {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, {0x0A59, 0x0A5C}, + {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, {0x0A81, 0x0A83}, + {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, + {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, + {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD}, + {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1}, + {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, {0x0B05, 0x0B0C}, + {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, + {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, {0x0B3C, 0x0B44}, + {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, {0x0B55, 0x0B57}, + {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, {0x0B66, 0x0B77}, + {0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, + {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, + {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, + {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, + {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, + {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, {0x0C0E, 0x0C10}, + {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3C, 0x0C44}, + {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, + {0x0C58, 0x0C5A}, {0x0C5D, 0x0C5D}, {0x0C60, 0x0C63}, {0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90}, {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD}, - {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3}, - {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C}, + {0x0CD5, 0x0CD6}, {0x0CDD, 0x0CDE}, {0x0CE0, 0x0CE3}, + {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF3}, {0x0D00, 0x0D0C}, {0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48}, {0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, {0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, @@ -200,7 +202,7 @@ var neutral = table{ {0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, {0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3}, {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4}, - {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, + {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECE}, {0x0ED0, 0x0ED9}, {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C}, {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC}, {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7}, @@ -212,20 +214,19 @@ var neutral = table{ {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8}, - {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736}, - {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, - {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9}, - {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819}, - {0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5}, - {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B}, - {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974}, - {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA}, - {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C}, - {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD}, - {0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C}, - {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49}, - {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7}, - {0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15}, + {0x1700, 0x1715}, {0x171F, 0x1736}, {0x1740, 0x1753}, + {0x1760, 0x176C}, {0x176E, 0x1770}, {0x1772, 0x1773}, + {0x1780, 0x17DD}, {0x17E0, 0x17E9}, {0x17F0, 0x17F9}, + {0x1800, 0x1819}, {0x1820, 0x1878}, {0x1880, 0x18AA}, + {0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1920, 0x192B}, + {0x1930, 0x193B}, {0x1940, 0x1940}, {0x1944, 0x196D}, + {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9}, + {0x19D0, 0x19DA}, {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, + {0x1A60, 0x1A7C}, {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, + {0x1AA0, 0x1AAD}, {0x1AB0, 0x1ACE}, {0x1B00, 0x1B4C}, + {0x1B50, 0x1B7E}, {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, + {0x1C3B, 0x1C49}, {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, + {0x1CBD, 0x1CC7}, {0x1CD0, 0x1CFA}, {0x1D00, 0x1F15}, {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, @@ -237,7 +238,7 @@ var neutral = table{ {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064}, {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080}, {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, - {0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0}, + {0x20AA, 0x20AB}, {0x20AD, 0x20C0}, {0x20D0, 0x20F0}, {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108}, {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120}, {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152}, @@ -275,15 +276,15 @@ var neutral = table{ {0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE}, {0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A}, {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73}, - {0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E}, - {0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, - {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, - {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, - {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, - {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, - {0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, - {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF}, - {0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839}, + {0x2B76, 0x2B95}, {0x2B97, 0x2CF3}, {0x2CF9, 0x2D25}, + {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, + {0x2D6F, 0x2D70}, {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, + {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, + {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, + {0x2DD8, 0x2DDE}, {0x2DE0, 0x2E5D}, {0x303F, 0x303F}, + {0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, + {0xA700, 0xA7CA}, {0xA7D0, 0xA7D1}, {0xA7D3, 0xA7D3}, + {0xA7D5, 0xA7D9}, {0xA7F2, 0xA82C}, {0xA830, 0xA839}, {0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, {0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD}, {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36}, @@ -294,8 +295,8 @@ var neutral = table{ {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF}, {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36}, {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, - {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F}, - {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD}, + {0xFB43, 0xFB44}, {0xFB46, 0xFBC2}, {0xFBD3, 0xFD8F}, + {0xFD92, 0xFDC7}, {0xFDCF, 0xFDCF}, {0xFDF0, 0xFDFF}, {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, @@ -307,44 +308,48 @@ var neutral = table{ {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5}, {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, - {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, - {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, - {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, - {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF}, - {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B}, - {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7}, - {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06}, - {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, - {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58}, - {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6}, - {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72}, - {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, - {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, - {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E}, - {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1}, - {0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB}, - {0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F}, - {0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, - {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147}, - {0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4}, - {0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286}, - {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, - {0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, - {0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, - {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, - {0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348}, - {0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357}, - {0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, - {0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7}, - {0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD}, - {0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, - {0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A}, - {0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B}, - {0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909}, - {0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935}, - {0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959}, - {0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, - {0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8}, + {0x1056F, 0x1057A}, {0x1057C, 0x1058A}, {0x1058C, 0x10592}, + {0x10594, 0x10595}, {0x10597, 0x105A1}, {0x105A3, 0x105B1}, + {0x105B3, 0x105B9}, {0x105BB, 0x105BC}, {0x10600, 0x10736}, + {0x10740, 0x10755}, {0x10760, 0x10767}, {0x10780, 0x10785}, + {0x10787, 0x107B0}, {0x107B2, 0x107BA}, {0x10800, 0x10805}, + {0x10808, 0x10808}, {0x1080A, 0x10835}, {0x10837, 0x10838}, + {0x1083C, 0x1083C}, {0x1083F, 0x10855}, {0x10857, 0x1089E}, + {0x108A7, 0x108AF}, {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, + {0x108FB, 0x1091B}, {0x1091F, 0x10939}, {0x1093F, 0x1093F}, + {0x10980, 0x109B7}, {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, + {0x10A05, 0x10A06}, {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, + {0x10A19, 0x10A35}, {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, + {0x10A50, 0x10A58}, {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, + {0x10AEB, 0x10AF6}, {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, + {0x10B58, 0x10B72}, {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, + {0x10BA9, 0x10BAF}, {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, + {0x10CC0, 0x10CF2}, {0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, + {0x10E60, 0x10E7E}, {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, + {0x10EB0, 0x10EB1}, {0x10EFD, 0x10F27}, {0x10F30, 0x10F59}, + {0x10F70, 0x10F89}, {0x10FB0, 0x10FCB}, {0x10FE0, 0x10FF6}, + {0x11000, 0x1104D}, {0x11052, 0x11075}, {0x1107F, 0x110C2}, + {0x110CD, 0x110CD}, {0x110D0, 0x110E8}, {0x110F0, 0x110F9}, + {0x11100, 0x11134}, {0x11136, 0x11147}, {0x11150, 0x11176}, + {0x11180, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211}, + {0x11213, 0x11241}, {0x11280, 0x11286}, {0x11288, 0x11288}, + {0x1128A, 0x1128D}, {0x1128F, 0x1129D}, {0x1129F, 0x112A9}, + {0x112B0, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11303}, + {0x11305, 0x1130C}, {0x1130F, 0x11310}, {0x11313, 0x11328}, + {0x1132A, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339}, + {0x1133B, 0x11344}, {0x11347, 0x11348}, {0x1134B, 0x1134D}, + {0x11350, 0x11350}, {0x11357, 0x11357}, {0x1135D, 0x11363}, + {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x1145B}, + {0x1145D, 0x11461}, {0x11480, 0x114C7}, {0x114D0, 0x114D9}, + {0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644}, + {0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B9}, + {0x116C0, 0x116C9}, {0x11700, 0x1171A}, {0x1171D, 0x1172B}, + {0x11730, 0x11746}, {0x11800, 0x1183B}, {0x118A0, 0x118F2}, + {0x118FF, 0x11906}, {0x11909, 0x11909}, {0x1190C, 0x11913}, + {0x11915, 0x11916}, {0x11918, 0x11935}, {0x11937, 0x11938}, + {0x1193B, 0x11946}, {0x11950, 0x11959}, {0x119A0, 0x119A7}, + {0x119AA, 0x119D7}, {0x119DA, 0x119E4}, {0x11A00, 0x11A47}, + {0x11A50, 0x11AA2}, {0x11AB0, 0x11AF8}, {0x11B00, 0x11B09}, {0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45}, {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09}, @@ -352,30 +357,36 @@ var neutral = table{ {0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65}, {0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91}, {0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8}, + {0x11F00, 0x11F10}, {0x11F12, 0x11F3A}, {0x11F3E, 0x11F59}, {0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399}, {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, - {0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646}, + {0x12F90, 0x12FF2}, {0x13000, 0x13455}, {0x14400, 0x14646}, {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, - {0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, - {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, - {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A}, - {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F}, - {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, - {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5}, - {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245}, - {0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, - {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, - {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, - {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, - {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, - {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, - {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, - {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, - {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, - {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, - {0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, - {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9}, - {0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, + {0x16A6E, 0x16ABE}, {0x16AC0, 0x16AC9}, {0x16AD0, 0x16AED}, + {0x16AF0, 0x16AF5}, {0x16B00, 0x16B45}, {0x16B50, 0x16B59}, + {0x16B5B, 0x16B61}, {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, + {0x16E40, 0x16E9A}, {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, + {0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, + {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, + {0x1CF00, 0x1CF2D}, {0x1CF30, 0x1CF46}, {0x1CF50, 0x1CFC3}, + {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1EA}, + {0x1D200, 0x1D245}, {0x1D2C0, 0x1D2D3}, {0x1D2E0, 0x1D2F3}, + {0x1D300, 0x1D356}, {0x1D360, 0x1D378}, {0x1D400, 0x1D454}, + {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, + {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, + {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, + {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C}, + {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, + {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, + {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, {0x1DA9B, 0x1DA9F}, + {0x1DAA1, 0x1DAAF}, {0x1DF00, 0x1DF1E}, {0x1DF25, 0x1DF2A}, + {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, + {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E030, 0x1E06D}, + {0x1E08F, 0x1E08F}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D}, + {0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E290, 0x1E2AE}, + {0x1E2C0, 0x1E2F9}, {0x1E2FF, 0x1E2FF}, {0x1E4D0, 0x1E4F9}, + {0x1E7E0, 0x1E7E6}, {0x1E7E8, 0x1E7EB}, {0x1E7ED, 0x1E7EE}, + {0x1E7F0, 0x1E7FE}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6}, {0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, {0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, @@ -400,8 +411,8 @@ var neutral = table{ {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4}, - {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, - {0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, + {0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F776}, + {0x1F77B, 0x1F7D9}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B}, {0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D}, diff --git a/vendor/github.com/osbuild/images/pkg/blueprint/filesystem_customizations.go b/vendor/github.com/osbuild/images/pkg/blueprint/filesystem_customizations.go index ad6959a71..829f72e95 100644 --- a/vendor/github.com/osbuild/images/pkg/blueprint/filesystem_customizations.go +++ b/vendor/github.com/osbuild/images/pkg/blueprint/filesystem_customizations.go @@ -2,6 +2,7 @@ package blueprint import ( "encoding/json" + "errors" "fmt" "github.com/osbuild/images/internal/common" @@ -73,16 +74,15 @@ func (fsc *FilesystemCustomization) UnmarshalJSON(data []byte) error { // CheckMountpointsPolicy checks if the mountpoints are allowed by the policy func CheckMountpointsPolicy(mountpoints []FilesystemCustomization, mountpointAllowList *pathpolicy.PathPolicies) error { - invalidMountpoints := []string{} + var errs []error for _, m := range mountpoints { - err := mountpointAllowList.Check(m.Mountpoint) - if err != nil { - invalidMountpoints = append(invalidMountpoints, m.Mountpoint) + if err := mountpointAllowList.Check(m.Mountpoint); err != nil { + errs = append(errs, err) } } - if len(invalidMountpoints) > 0 { - return fmt.Errorf("The following custom mountpoints are not supported %+q", invalidMountpoints) + if len(errs) > 0 { + return fmt.Errorf("The following errors occurred while setting up custom mountpoints:\n%w", errors.Join(errs...)) } return nil diff --git a/vendor/github.com/osbuild/images/pkg/disk/btrfs.go b/vendor/github.com/osbuild/images/pkg/disk/btrfs.go index 329ebaca8..d6d82ea9d 100644 --- a/vendor/github.com/osbuild/images/pkg/disk/btrfs.go +++ b/vendor/github.com/osbuild/images/pkg/disk/btrfs.go @@ -177,13 +177,13 @@ func (bs *BtrfsSubvolume) GetFSSpec() FSSpec { } } -func (bs *BtrfsSubvolume) GetFSTabOptions() FSTabOptions { +func (bs *BtrfsSubvolume) GetFSTabOptions() (FSTabOptions, error) { if bs == nil { - return FSTabOptions{} + return FSTabOptions{}, nil } if bs.Name == "" { - panic(fmt.Errorf("internal error: BtrfsSubvolume.GetFSTabOptions() for %+v called without a name", bs)) + return FSTabOptions{}, fmt.Errorf("internal error: BtrfsSubvolume.GetFSTabOptions() for %+v called without a name", bs) } ops := fmt.Sprintf("subvol=%s", bs.Name) if bs.Compress != "" { @@ -196,5 +196,5 @@ func (bs *BtrfsSubvolume) GetFSTabOptions() FSTabOptions { MntOps: ops, Freq: 0, PassNo: 0, - } + }, nil } diff --git a/vendor/github.com/osbuild/images/pkg/disk/disk.go b/vendor/github.com/osbuild/images/pkg/disk/disk.go index 2e64d9841..aa7a288df 100644 --- a/vendor/github.com/osbuild/images/pkg/disk/disk.go +++ b/vendor/github.com/osbuild/images/pkg/disk/disk.go @@ -114,7 +114,7 @@ type Mountable interface { GetFSSpec() FSSpec // GetFSTabOptions returns options for mounting the entity. - GetFSTabOptions() FSTabOptions + GetFSTabOptions() (FSTabOptions, error) } // A MountpointCreator is a container that is able to create new volumes. diff --git a/vendor/github.com/osbuild/images/pkg/disk/filesystem.go b/vendor/github.com/osbuild/images/pkg/disk/filesystem.go index fb17253ba..1d053faab 100644 --- a/vendor/github.com/osbuild/images/pkg/disk/filesystem.go +++ b/vendor/github.com/osbuild/images/pkg/disk/filesystem.go @@ -76,15 +76,15 @@ func (fs *Filesystem) GetFSSpec() FSSpec { } } -func (fs *Filesystem) GetFSTabOptions() FSTabOptions { +func (fs *Filesystem) GetFSTabOptions() (FSTabOptions, error) { if fs == nil { - return FSTabOptions{} + return FSTabOptions{}, nil } return FSTabOptions{ MntOps: fs.FSTabOptions, Freq: fs.FSTabFreq, PassNo: fs.FSTabPassNo, - } + }, nil } func (fs *Filesystem) GenUUID(rng *rand.Rand) { diff --git a/vendor/github.com/osbuild/images/pkg/disk/partition_table.go b/vendor/github.com/osbuild/images/pkg/disk/partition_table.go index a4f406c23..b49e76dc6 100644 --- a/vendor/github.com/osbuild/images/pkg/disk/partition_table.go +++ b/vendor/github.com/osbuild/images/pkg/disk/partition_table.go @@ -773,6 +773,10 @@ func (pt *PartitionTable) ensureBtrfs() error { return fmt.Errorf("root entity is not mountable: %T, this is a violation of entityPath() contract", rootPath[0]) } + opts, err := rootMountable.GetFSTabOptions() + if err != nil { + return err + } btrfs := &Btrfs{ Label: "root", Subvolumes: []BtrfsSubvolume{ @@ -780,7 +784,7 @@ func (pt *PartitionTable) ensureBtrfs() error { Name: "root", Mountpoint: "/", Compress: DefaultBtrfsCompression, - ReadOnly: rootMountable.GetFSTabOptions().ReadOnly(), + ReadOnly: opts.ReadOnly(), }, }, } diff --git a/vendor/github.com/osbuild/images/pkg/distro/fedora/version.go b/vendor/github.com/osbuild/images/pkg/distro/fedora/version.go index c750a799a..3dddeb87c 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/fedora/version.go +++ b/vendor/github.com/osbuild/images/pkg/distro/fedora/version.go @@ -1,4 +1,4 @@ package fedora const VERSION_BRANCHED = "41" -const VERSION_RAWHIDE = "41" +const VERSION_RAWHIDE = "42" diff --git a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/bare_metal.go b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/bare_metal.go index 3a32281e6..81b9a7eea 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/bare_metal.go +++ b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/bare_metal.go @@ -1,6 +1,10 @@ package rhel10 import ( + "fmt" + + "github.com/osbuild/images/pkg/arch" + "github.com/osbuild/images/pkg/distro" "github.com/osbuild/images/pkg/distro/rhel" "github.com/osbuild/images/pkg/rpmmd" ) @@ -24,3 +28,301 @@ func mkTarImgType() *rhel.ImageType { []string{"archive"}, ) } + +func mkImageInstallerImgType() *rhel.ImageType { + it := rhel.NewImageType( + "image-installer", + "installer.iso", + "application/x-iso9660-image", + map[string]rhel.PackageSetFunc{ + rhel.OSPkgsKey: bareMetalPackageSet, + rhel.InstallerPkgsKey: anacondaPackageSet, + }, + rhel.ImageInstallerImage, + []string{"build"}, + []string{"anaconda-tree", "rootfs-image", "efiboot-tree", "os", "bootiso-tree", "bootiso"}, + []string{"bootiso"}, + ) + + it.BootISO = true + it.Bootable = true + it.ISOLabelFn = distroISOLabelFunc + + it.DefaultInstallerConfig = &distro.InstallerConfig{ + AdditionalDracutModules: []string{ + "nvdimm", // non-volatile DIMM firmware (provides nfit, cuse, and nd_e820) + "prefixdevname", + "prefixdevname-tools", + }, + AdditionalDrivers: []string{ + "ipmi_devintf", + "ipmi_msghandler", + }, + } + + return it +} + +// PACKAGE SETS + +func bareMetalPackageSet(t *rhel.ImageType) rpmmd.PackageSet { + ps := rpmmd.PackageSet{ + Include: []string{ + "@core", + "chrony", + "cockpit-system", + "cockpit-ws", + "dnf-utils", + "dosfstools", + "firewalld", + "iwl1000-firmware", + "iwl100-firmware", + "iwl105-firmware", + "iwl135-firmware", + "iwl2000-firmware", + "iwl2030-firmware", + "iwl3160-firmware", + "iwl5000-firmware", + "iwl5150-firmware", + "iwl6000g2a-firmware", + "iwl6000g2b-firmware", + "iwl6050-firmware", + "iwl7260-firmware", + "lvm2", + "net-tools", + "nfs-utils", + "oddjob", + "oddjob-mkhomedir", + "policycoreutils", + "psmisc", + "python3-jsonschema", + "qemu-guest-agent", + "redhat-release", + "redhat-release-eula", + "rsync", + "tar", + "tcpdump", + "tuned", + }, + Exclude: []string{ + "dracut-config-rescue", + }, + }.Append(distroBuildPackageSet(t)) + + // Ensure to not pull in subscription-manager on non-RHEL distro + if t.IsRHEL() { + ps = ps.Append(rpmmd.PackageSet{ + Include: []string{ + "subscription-manager-cockpit", + }, + }) + } + + return ps +} + +func installerPackageSet(t *rhel.ImageType) rpmmd.PackageSet { + ps := rpmmd.PackageSet{ + Include: []string{ + "anaconda-dracut", + "curl", + "dracut-config-generic", + "dracut-network", + "hostname", + "iwl100-firmware", + "iwl1000-firmware", + "iwl105-firmware", + "iwl135-firmware", + "iwl2000-firmware", + "iwl2030-firmware", + "iwl3160-firmware", + "iwl5000-firmware", + "iwl5150-firmware", + "iwl6050-firmware", + "iwl7260-firmware", + "kernel", + "less", + "nfs-utils", + "openssh-clients", + "ostree", + "plymouth", + "prefixdevname", + "rng-tools", + "rpcbind", + "selinux-policy-targeted", + "systemd", + "tar", + "xfsprogs", + "xz", + }, + } + + ps = ps.Append(rpmmd.PackageSet{ + // Extra packages that are required by the dracut stage of all installers. + // These are weak deps of other packages in the list above, but lets be + // explicit about what we need and not rely on the weak deps. Relying + // on hard-dependencies for other modules is OK for now. + // + // TODO: add these dynamically based on the modules enabled by each + // pipeline. + Include: []string{ + "mdadm", + "nss-softokn", + }, + }) + + switch t.Arch().Name() { + case arch.ARCH_X86_64.String(): + ps = ps.Append(rpmmd.PackageSet{ + Include: []string{ + "biosdevname", + }, + }) + } + + return ps +} + +func anacondaPackageSet(t *rhel.ImageType) rpmmd.PackageSet { + + // common installer packages + ps := installerPackageSet(t) + + ps = ps.Append(rpmmd.PackageSet{ + Include: []string{ + "alsa-firmware", + "alsa-tools-firmware", + "anaconda", + "anaconda-dracut", + "anaconda-install-img-deps", + "anaconda-widgets", + "audit", + "bind-utils", + "bzip2", + "cryptsetup", + "curl", + "dbus-x11", + "default-fonts-core-sans", + "default-fonts-other-sans", + "dejavu-sans-fonts", + "dejavu-sans-mono-fonts", + "device-mapper-persistent-data", + "dmidecode", + "dnf", + "dracut-config-generic", + "dracut-network", + "efibootmgr", + "ethtool", + "fcoe-utils", + "ftp", + "gdb-gdbserver", + "glibc-all-langpacks", + "gnome-kiosk", + "google-noto-sans-cjk-ttc-fonts", + "grub2-tools", + "grub2-tools-extra", + "grub2-tools-minimal", + "grubby", + "gsettings-desktop-schemas", + "hdparm", + "hexedit", + "hostname", + "initscripts", + "ipmitool", + "iwl1000-firmware", + "iwl100-firmware", + "iwl105-firmware", + "iwl135-firmware", + "iwl2000-firmware", + "iwl2030-firmware", + "iwl3160-firmware", + "iwl5000-firmware", + "iwl5150-firmware", + "iwl6000g2a-firmware", + "iwl6000g2b-firmware", + "iwl6050-firmware", + "iwl7260-firmware", + "jomolhari-fonts", + "kbd", + "kbd-misc", + "kdump-anaconda-addon", + "kernel", + "less", + "libblockdev-lvm-dbus", + "libibverbs", + "librsvg2", + "linux-firmware", + "lldpad", + "lsof", + "madan-fonts", + "mtr", + "mt-st", + "net-tools", + "nfs-utils", + "nmap-ncat", + "nm-connection-editor", + "nss-tools", + "openssh-clients", + "openssh-server", + // the package is not yet available on c10s / el10 + //"oscap-anaconda-addon", + "ostree", + "pciutils", + "perl-interpreter", + "pigz", + "plymouth", + "prefixdevname", + "python3-pyatspi", + "rdma-core", + "redhat-release-eula", + "rng-tools", + "rpcbind", + "rpm-ostree", + "rsync", + "rsyslog", + "selinux-policy-targeted", + "sg3_utils", + "sil-padauk-fonts", + "smartmontools", + "spice-vdagent", + "strace", + "systemd", + "tar", + "udisks2", + "udisks2-iscsi", + "usbutils", + "vim-minimal", + "volume_key", + "wget", + "xfsdump", + "xfsprogs", + "xz", + }, + }) + + ps = ps.Append(anacondaBootPackageSet(t)) + + switch t.Arch().Name() { + case arch.ARCH_X86_64.String(): + ps = ps.Append(rpmmd.PackageSet{ + Include: []string{ + "biosdevname", + "dmidecode", + "grub2-tools-efi", + "memtest86+", + }, + }) + + case arch.ARCH_AARCH64.String(): + ps = ps.Append(rpmmd.PackageSet{ + Include: []string{ + "dmidecode", + }, + }) + + default: + panic(fmt.Sprintf("unsupported arch: %s", t.Arch().Name())) + } + + return ps +} diff --git a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/distro.go b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/distro.go index 7f9cb52bf..1b21cafbe 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/distro.go +++ b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/distro.go @@ -2,6 +2,7 @@ package rhel10 import ( "fmt" + "strings" "github.com/osbuild/images/internal/common" "github.com/osbuild/images/pkg/arch" @@ -36,6 +37,18 @@ var ( } ) +func distroISOLabelFunc(t *rhel.ImageType) string { + const RHEL_ISO_LABEL = "RHEL-%s-%s-0-BaseOS-%s" + const CS_ISO_LABEL = "CentOS-Stream-%s-BaseOS-%s" + + if t.IsRHEL() { + osVer := strings.Split(t.Arch().Distro().OsVersion(), ".") + return fmt.Sprintf(RHEL_ISO_LABEL, osVer[0], osVer[1], t.Arch().Name()) + } else { + return fmt.Sprintf(CS_ISO_LABEL, t.Arch().Distro().Releasever(), t.Arch().Name()) + } +} + func defaultDistroImageConfig(d *rhel.Distribution) *distro.ImageConfig { return &distro.ImageConfig{ Timezone: common.ToPtr("America/New_York"), @@ -202,6 +215,48 @@ func newDistro(name string, major, minor int) *rhel.Distribution { aarch64.AddImageTypes(azureAarch64Platform, mkAzureImgType()) } + gceX86Platform := &platform.X86{ + UEFIVendor: rd.Vendor(), + BasePlatform: platform.BasePlatform{ + ImageFormat: platform.FORMAT_GCE, + }, + } + x86_64.AddImageTypes( + gceX86Platform, + mkGCEImageType(), + ) + + x86_64.AddImageTypes( + &platform.X86{ + BasePlatform: platform.BasePlatform{ + FirmwarePackages: []string{ + "microcode_ctl", // ?? + "iwl1000-firmware", + "iwl100-firmware", + "iwl105-firmware", + "iwl135-firmware", + "iwl2000-firmware", + "iwl2030-firmware", + "iwl3160-firmware", + "iwl5000-firmware", + "iwl5150-firmware", + "iwl6050-firmware", + }, + }, + BIOS: true, + UEFIVendor: rd.Vendor(), + }, + mkImageInstallerImgType(), + ) + + aarch64.AddImageTypes( + &platform.Aarch64{ + BasePlatform: platform.BasePlatform{}, + UEFIVendor: rd.Vendor(), + }, + mkImageInstallerImgType(), + ) + rd.AddArches(x86_64, aarch64, ppc64le, s390x) return rd } diff --git a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/gce.go b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/gce.go new file mode 100644 index 000000000..e1a0cf811 --- /dev/null +++ b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/gce.go @@ -0,0 +1,226 @@ +package rhel10 + +import ( + "github.com/osbuild/images/internal/common" + "github.com/osbuild/images/pkg/distro" + "github.com/osbuild/images/pkg/distro/rhel" + "github.com/osbuild/images/pkg/osbuild" + "github.com/osbuild/images/pkg/rpmmd" +) + +const gceKernelOptions = "biosdevname=0 scsi_mod.use_blk_mq=Y console=ttyS0,38400n8d" + +func mkGCEImageType() *rhel.ImageType { + it := rhel.NewImageType( + "gce", + "image.tar.gz", + "application/gzip", + map[string]rhel.PackageSetFunc{ + rhel.OSPkgsKey: gcePackageSet, + }, + rhel.DiskImage, + []string{"build"}, + []string{"os", "image", "archive"}, + []string{"archive"}, + ) + + it.DefaultImageConfig = baseGCEImageConfig() + it.KernelOptions = gceKernelOptions + it.DefaultSize = 20 * common.GibiByte + it.Bootable = true + // TODO: the base partition table still contains the BIOS boot partition, but the image is UEFI-only + it.BasePartitionTables = defaultBasePartitionTables + + return it +} + +func baseGCEImageConfig() *distro.ImageConfig { + ic := &distro.ImageConfig{ + Timezone: common.ToPtr("UTC"), + TimeSynchronization: &osbuild.ChronyStageOptions{ + Servers: []osbuild.ChronyConfigServer{{Hostname: "metadata.google.internal"}}, + }, + Firewall: &osbuild.FirewallStageOptions{ + DefaultZone: "trusted", + }, + EnabledServices: []string{ + "sshd", + "rngd", + "dnf-automatic.timer", + // TODO: remove cloud-init services once we switch back to GCP guest tools + "cloud-init", + "cloud-init-local", + "cloud-config", + "cloud-final", + }, + DisabledServices: []string{ + "sshd-keygen@", + "reboot.target", + }, + DefaultTarget: common.ToPtr("multi-user.target"), + Locale: common.ToPtr("en_US.UTF-8"), + Keyboard: &osbuild.KeymapStageOptions{ + Keymap: "us", + }, + DNFConfig: []*osbuild.DNFConfigStageOptions{ + { + Config: &osbuild.DNFConfig{ + Main: &osbuild.DNFConfigMain{ + IPResolve: "4", + }, + }, + }, + }, + DNFAutomaticConfig: &osbuild.DNFAutomaticConfigStageOptions{ + Config: &osbuild.DNFAutomaticConfig{ + Commands: &osbuild.DNFAutomaticConfigCommands{ + ApplyUpdates: common.ToPtr(true), + UpgradeType: osbuild.DNFAutomaticUpgradeTypeSecurity, + }, + }, + }, + YUMRepos: []*osbuild.YumReposStageOptions{ + { + Filename: "google-cloud.repo", + Repos: []osbuild.YumRepository{ + { + Id: "google-compute-engine", + Name: "Google Compute Engine", + // TODO: use el10 repo once it's available + BaseURLs: []string{"https://packages.cloud.google.com/yum/repos/google-compute-engine-el9-x86_64-stable"}, + Enabled: common.ToPtr(true), + // TODO: enable GPG check once Google stops using SHA-1 in their keys + // https://issuetracker.google.com/issues/360905189 + GPGCheck: common.ToPtr(false), + RepoGPGCheck: common.ToPtr(false), + GPGKey: []string{ + "https://packages.cloud.google.com/yum/doc/yum-key.gpg", + "https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg", + }, + }, + }, + }, + }, + SshdConfig: &osbuild.SshdConfigStageOptions{ + Config: osbuild.SshdConfigConfig{ + PasswordAuthentication: common.ToPtr(false), + ClientAliveInterval: common.ToPtr(420), + PermitRootLogin: osbuild.PermitRootLoginValueNo, + }, + }, + Sysconfig: []*osbuild.SysconfigStageOptions{ + { + Kernel: &osbuild.SysconfigKernelOptions{ + DefaultKernel: "kernel-core", + UpdateDefault: true, + }, + }, + }, + Modprobe: []*osbuild.ModprobeStageOptions{ + { + Filename: "blacklist-floppy.conf", + Commands: osbuild.ModprobeConfigCmdList{ + osbuild.NewModprobeConfigCmdBlacklist("floppy"), + }, + }, + }, + GCPGuestAgentConfig: &osbuild.GcpGuestAgentConfigOptions{ + ConfigScope: osbuild.GcpGuestAgentConfigScopeDistro, + Config: &osbuild.GcpGuestAgentConfig{ + InstanceSetup: &osbuild.GcpGuestAgentConfigInstanceSetup{ + SetBotoConfig: common.ToPtr(false), + }, + }, + }, + } + + return ic +} + +func gceCommonPackageSet(t *rhel.ImageType) rpmmd.PackageSet { + ps := rpmmd.PackageSet{ + Include: []string{ + "@core", + "langpacks-en", // not in Google's KS + "acpid", + "dnf-automatic", + "net-tools", + "python3", + "rng-tools", + "tar", + "vim", + + // GCE guest tools + // TODO: uncomment once the package is available + // the el9 version depends on libboost_regex.so.1.75.0()(64bit), which is not available on el10 + //"google-compute-engine", + "google-osconfig-agent", + "gce-disk-expand", + // cloud-init is a replacement for "google-compute-engine", remove once the package is available + "cloud-init", + + // Not explicitly included in GCP kickstart, but present on the image + // for time synchronization + "chrony", + "timedatex", + // EFI + "grub2-tools", + "grub2-tools-minimal", + // Performance tuning + "tuned", + }, + Exclude: []string{ + "alsa-utils", + "b43-fwcutter", + "dmraid", + "dracut-config-rescue", + "eject", + "gpm", + "irqbalance", + "microcode_ctl", + "smartmontools", + "aic94xx-firmware", + "atmel-firmware", + "b43-openfwwf", + "bfa-firmware", + "ipw2100-firmware", + "ipw2200-firmware", + "ivtv-firmware", + "iwl100-firmware", + "iwl105-firmware", + "iwl135-firmware", + "iwl1000-firmware", + "iwl2000-firmware", + "iwl2030-firmware", + "iwl3160-firmware", + "iwl3945-firmware", + "iwl4965-firmware", + "iwl5000-firmware", + "iwl5150-firmware", + "iwl6000-firmware", + "iwl6000g2a-firmware", + "iwl6050-firmware", + "iwl7260-firmware", + "kernel-firmware", + "libertas-usb8388-firmware", + "ql2100-firmware", + "ql2200-firmware", + "ql23xx-firmware", + "ql2400-firmware", + "ql2500-firmware", + "rt61pci-firmware", + "rt73usb-firmware", + "xorg-x11-drv-ati-firmware", + "zd1211-firmware", + // RHBZ#2075815 + "qemu-guest-agent", + }, + }.Append(distroSpecificPackageSet(t)) + + return ps +} + +// GCE image +func gcePackageSet(t *rhel.ImageType) rpmmd.PackageSet { + return gceCommonPackageSet(t) +} diff --git a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/package_sets.go b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/package_sets.go index c0c9994b4..7a272cfd1 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/package_sets.go +++ b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/package_sets.go @@ -3,6 +3,8 @@ package rhel10 // This file defines package sets that are used by more than one image type. import ( + "fmt" + "github.com/osbuild/images/pkg/arch" "github.com/osbuild/images/pkg/distro/rhel" "github.com/osbuild/images/pkg/rpmmd" @@ -63,6 +65,58 @@ func ppc64leBuildPackageSet(t *rhel.ImageType) rpmmd.PackageSet { } } +// installer boot package sets, needed for booting and +// also in the build host +func anacondaBootPackageSet(t *rhel.ImageType) rpmmd.PackageSet { + ps := rpmmd.PackageSet{} + + grubCommon := rpmmd.PackageSet{ + Include: []string{ + "grub2-tools", + "grub2-tools-extra", + "grub2-tools-minimal", + }, + } + + efiCommon := rpmmd.PackageSet{ + Include: []string{ + "efibootmgr", + }, + } + + switch t.Arch().Name() { + case arch.ARCH_X86_64.String(): + ps = ps.Append(grubCommon) + ps = ps.Append(efiCommon) + ps = ps.Append(rpmmd.PackageSet{ + Include: []string{ + "grub2-efi-x64", + "grub2-efi-x64-cdboot", + "grub2-pc", + "grub2-pc-modules", + "shim-x64", + "syslinux", + "syslinux-nonlinux", + }, + }) + case arch.ARCH_AARCH64.String(): + ps = ps.Append(grubCommon) + ps = ps.Append(efiCommon) + ps = ps.Append(rpmmd.PackageSet{ + Include: []string{ + "grub2-efi-aa64-cdboot", + "grub2-efi-aa64", + "shim-aa64", + }, + }) + + default: + panic(fmt.Sprintf("unsupported arch: %s", t.Arch().Name())) + } + + return ps +} + // OS package sets // packages that are only in some (sub)-distributions diff --git a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/qcow2.go b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/qcow2.go index bee18f977..ecd541fe0 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/qcow2.go +++ b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel10/qcow2.go @@ -110,7 +110,6 @@ func qcow2CommonPackageSet(t *rhel.ImageType) rpmmd.PackageSet { "langpacks-*", "langpacks-en", "libertas-sd8787-firmware", - "nss", "plymouth", "rng-tools", "udisks2", diff --git a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel9/gce.go b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel9/gce.go index c9534e6cb..be9cad1b3 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel9/gce.go +++ b/vendor/github.com/osbuild/images/pkg/distro/rhel/rhel9/gce.go @@ -106,14 +106,12 @@ func baseGCEImageConfig() *distro.ImageConfig { Filename: "google-cloud.repo", Repos: []osbuild.YumRepository{ { - Id: "google-compute-engine", - Name: "Google Compute Engine", - BaseURLs: []string{"https://packages.cloud.google.com/yum/repos/google-compute-engine-el9-x86_64-stable"}, - Enabled: common.ToPtr(true), - // TODO: enable GPG check once Google stops using SHA-1 in their keys - // https://issuetracker.google.com/issues/223626963 - GPGCheck: common.ToPtr(false), - RepoGPGCheck: common.ToPtr(false), + Id: "google-compute-engine", + Name: "Google Compute Engine", + BaseURLs: []string{"https://packages.cloud.google.com/yum/repos/google-compute-engine-el9-x86_64-stable"}, + Enabled: common.ToPtr(true), + GPGCheck: common.ToPtr(true), + RepoGPGCheck: common.ToPtr(true), GPGKey: []string{ "https://packages.cloud.google.com/yum/doc/yum-key.gpg", "https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg", diff --git a/vendor/github.com/osbuild/images/pkg/distro/test_distro/distro.go b/vendor/github.com/osbuild/images/pkg/distro/test_distro/distro.go index c1617aab9..f8354ab80 100644 --- a/vendor/github.com/osbuild/images/pkg/distro/test_distro/distro.go +++ b/vendor/github.com/osbuild/images/pkg/distro/test_distro/distro.go @@ -9,6 +9,7 @@ import ( "github.com/osbuild/images/pkg/distro" "github.com/osbuild/images/pkg/manifest" "github.com/osbuild/images/pkg/ostree" + "github.com/osbuild/images/pkg/policies" "github.com/osbuild/images/pkg/rpmmd" ) @@ -240,15 +241,9 @@ func (t *TestImageType) Manifest(b *blueprint.Blueprint, options distro.ImageOpt if b != nil { mountpoints := b.Customizations.GetFilesystems() - invalidMountpoints := []string{} - for _, m := range mountpoints { - if m.Mountpoint != "/" { - invalidMountpoints = append(invalidMountpoints, m.Mountpoint) - } - } - - if len(invalidMountpoints) > 0 { - return nil, nil, fmt.Errorf("The following custom mountpoints are not supported %+q", invalidMountpoints) + err := blueprint.CheckMountpointsPolicy(mountpoints, policies.MountpointPolicies) + if err != nil { + return nil, nil, err } bpPkgs = b.GetPackages() diff --git a/vendor/github.com/osbuild/images/pkg/manifest/os.go b/vendor/github.com/osbuild/images/pkg/manifest/os.go index 3878ec76c..7d6181bdf 100644 --- a/vendor/github.com/osbuild/images/pkg/manifest/os.go +++ b/vendor/github.com/osbuild/images/pkg/manifest/os.go @@ -678,7 +678,11 @@ func (p *OS) serialize() osbuild.Pipeline { pipeline = prependKernelCmdlineStage(pipeline, strings.Join(kernelOptions, " "), pt) } - pipeline.AddStage(osbuild.NewFSTabStage(osbuild.NewFSTabStageOptions(pt))) + opts, err := osbuild.NewFSTabStageOptions(pt) + if err != nil { + panic(err) + } + pipeline.AddStage(osbuild.NewFSTabStage(opts)) var bootloader *osbuild.Stage switch p.platform.GetArch() { diff --git a/vendor/github.com/osbuild/images/pkg/manifest/ostree_deployment.go b/vendor/github.com/osbuild/images/pkg/manifest/ostree_deployment.go index 6d00bcb61..084610337 100644 --- a/vendor/github.com/osbuild/images/pkg/manifest/ostree_deployment.go +++ b/vendor/github.com/osbuild/images/pkg/manifest/ostree_deployment.go @@ -330,7 +330,10 @@ func (p *OSTreeDeployment) serialize() osbuild.Pipeline { configStage.MountOSTree(p.osName, ref, 0) pipeline.AddStage(configStage) - fstabOptions := osbuild.NewFSTabStageOptions(p.PartitionTable) + fstabOptions, err := osbuild.NewFSTabStageOptions(p.PartitionTable) + if err != nil { + panic(err) + } fstabStage := osbuild.NewFSTabStage(fstabOptions) fstabStage.MountOSTree(p.osName, ref, 0) pipeline.AddStage(fstabStage) diff --git a/vendor/github.com/osbuild/images/pkg/manifest/raw_bootc.go b/vendor/github.com/osbuild/images/pkg/manifest/raw_bootc.go index c793e034c..dce79bba7 100644 --- a/vendor/github.com/osbuild/images/pkg/manifest/raw_bootc.go +++ b/vendor/github.com/osbuild/images/pkg/manifest/raw_bootc.go @@ -166,7 +166,11 @@ func (p *RawBootcImage) serialize() osbuild.Pipeline { mounts = append(mounts, *osbuild.NewBindMount("bind-ostree-deployment-to-tree", "mount://", "tree://")) // we always include the fstab stage - fstabStage := osbuild.NewFSTabStage(osbuild.NewFSTabStageOptions(pt)) + fstabOpts, err := osbuild.NewFSTabStageOptions(pt) + if err != nil { + panic(err) + } + fstabStage := osbuild.NewFSTabStage(fstabOpts) fstabStage.Mounts = mounts fstabStage.Devices = devices pipeline.AddStage(fstabStage) diff --git a/vendor/github.com/osbuild/images/pkg/osbuild/dracut_stage.go b/vendor/github.com/osbuild/images/pkg/osbuild/dracut_stage.go index b7b141bc5..daa4ff5d3 100644 --- a/vendor/github.com/osbuild/images/pkg/osbuild/dracut_stage.go +++ b/vendor/github.com/osbuild/images/pkg/osbuild/dracut_stage.go @@ -30,7 +30,7 @@ type DracutStageOptions struct { // Add custom files to the initramfs // What (keys) to include where (values) - Include map[string]string `json:"include,omitempty"` + Include []map[string]string `json:"include,omitempty"` // Install the specified files Install []string `json:"install,omitempty"` diff --git a/vendor/github.com/osbuild/images/pkg/osbuild/fstab_stage.go b/vendor/github.com/osbuild/images/pkg/osbuild/fstab_stage.go index 09c913988..6f807a52c 100644 --- a/vendor/github.com/osbuild/images/pkg/osbuild/fstab_stage.go +++ b/vendor/github.com/osbuild/images/pkg/osbuild/fstab_stage.go @@ -56,11 +56,14 @@ func (options *FSTabStageOptions) AddFilesystem(id string, vfsType string, path }) } -func NewFSTabStageOptions(pt *disk.PartitionTable) *FSTabStageOptions { +func NewFSTabStageOptions(pt *disk.PartitionTable) (*FSTabStageOptions, error) { var options FSTabStageOptions genOption := func(mnt disk.Mountable, path []disk.Entity) error { fsSpec := mnt.GetFSSpec() - fsOptions := mnt.GetFSTabOptions() + fsOptions, err := mnt.GetFSTabOptions() + if err != nil { + return err + } options.AddFilesystem(fsSpec.UUID, mnt.GetFSType(), mnt.GetMountpoint(), fsOptions.MntOps, fsOptions.Freq, fsOptions.PassNo) return nil } @@ -69,10 +72,14 @@ func NewFSTabStageOptions(pt *disk.PartitionTable) *FSTabStageOptions { return fmt.Sprintf("%d%s", fs.PassNo, fs.Path) } - _ = pt.ForEachMountable(genOption) // genOption always returns nil + err := pt.ForEachMountable(genOption) + if err != nil { + return nil, err + } + // sort the entries by PassNo to maintain backward compatibility sort.Slice(options.FileSystems, func(i, j int) bool { return key(options.FileSystems[i]) < key(options.FileSystems[j]) }) - return &options + return &options, nil } diff --git a/vendor/github.com/osbuild/images/pkg/pathpolicy/path_policy.go b/vendor/github.com/osbuild/images/pkg/pathpolicy/path_policy.go index 423a5751b..ffa756031 100644 --- a/vendor/github.com/osbuild/images/pkg/pathpolicy/path_policy.go +++ b/vendor/github.com/osbuild/images/pkg/pathpolicy/path_policy.go @@ -10,43 +10,36 @@ type PathPolicy struct { Exact bool // require and exact match, no subdirs } -type PathPolicies = PathTrie +type PathPolicies struct { + pathTrie *pathTrie[PathPolicy] +} // Create a new PathPolicies trie from a map of path to PathPolicy func NewPathPolicies(entries map[string]PathPolicy) *PathPolicies { - - noType := make(map[string]interface{}, len(entries)) - - for k, v := range entries { - noType[k] = v + return &PathPolicies{ + pathTrie: newPathTrieFromMap[PathPolicy](entries), } - - return NewPathTrieFromMap(noType) } // Check a given path against the PathPolicies func (pol *PathPolicies) Check(fsPath string) error { - // Quickly check we have a path and it is absolute if fsPath == "" || fsPath[0] != '/' { - return fmt.Errorf("path must be absolute") + return fmt.Errorf("path %q must be absolute", fsPath) } // ensure that only clean paths are valid if fsPath != path.Clean(fsPath) { - return fmt.Errorf("path must be canonical") + return fmt.Errorf("path %q must be canonical", fsPath) } - node, left := pol.Lookup(fsPath) - policy, ok := node.Payload.(PathPolicy) - if !ok { - panic("programming error: invalid path trie payload") - } + node, left := pol.pathTrie.Lookup(fsPath) + policy := node.Payload // 1) path is explicitly not allowed or // 2) a subpath was match but an explicit match is required if policy.Deny || (policy.Exact && len(left) > 0) { - return fmt.Errorf("path '%s ' is not allowed", fsPath) + return fmt.Errorf("path %q is not allowed", fsPath) } // exact match or recursive path allowed diff --git a/vendor/github.com/osbuild/images/pkg/pathpolicy/path_trie.go b/vendor/github.com/osbuild/images/pkg/pathpolicy/path_trie.go index 1de1f46a6..5aa857a72 100644 --- a/vendor/github.com/osbuild/images/pkg/pathpolicy/path_trie.go +++ b/vendor/github.com/osbuild/images/pkg/pathpolicy/path_trie.go @@ -16,14 +16,14 @@ func pathTrieSplitPath(path string) []string { return strings.Split(path, "/") } -type PathTrie struct { +type pathTrie[T any] struct { Name []string - Paths []*PathTrie - Payload interface{} + Paths []*pathTrie[T] + Payload T } // match checks if the given trie is a prefix of path -func (trie *PathTrie) match(path []string) bool { +func (trie *pathTrie[T]) match(path []string) bool { if len(trie.Name) > len(path) { return false } @@ -37,12 +37,12 @@ func (trie *PathTrie) match(path []string) bool { return true } -func (trie *PathTrie) get(path []string) (*PathTrie, []string) { +func (trie *pathTrie[T]) get(path []string) (*pathTrie[T], []string) { if len(path) < 1 { panic("programming error: expected root node") } - var node *PathTrie + var node *pathTrie[T] for i := range trie.Paths { if trie.Paths[i].match(path) { node = trie.Paths[i] @@ -67,11 +67,11 @@ func (trie *PathTrie) get(path []string) (*PathTrie, []string) { return node.get(path[prefix:]) } -func (trie *PathTrie) add(path []string) *PathTrie { - node := &PathTrie{Name: path} +func (trie *pathTrie[T]) add(path []string) *pathTrie[T] { + node := &pathTrie[T]{Name: path} if trie.Paths == nil { - trie.Paths = make([]*PathTrie, 0, 1) + trie.Paths = make([]*pathTrie[T], 0, 1) } trie.Paths = append(trie.Paths, node) @@ -81,8 +81,8 @@ func (trie *PathTrie) add(path []string) *PathTrie { // Construct a new trie from a map of paths to their payloads. // Returns the root node of the trie. -func NewPathTrieFromMap(entries map[string]interface{}) *PathTrie { - root := &PathTrie{Name: []string{}} +func newPathTrieFromMap[T any](entries map[string]T) *pathTrie[T] { + root := &pathTrie[T]{Name: []string{}} keys := make([]string, 0, len(entries)) for k := range entries { @@ -107,7 +107,7 @@ func NewPathTrieFromMap(entries map[string]interface{}) *PathTrie { // Lookup returns the node that is the prefix of path and // the unmatched path segment. Must be called on the root // trie node. -func (root *PathTrie) Lookup(path string) (*PathTrie, []string) { +func (root *pathTrie[T]) Lookup(path string) (*pathTrie[T], []string) { if len(root.Name) != 0 { panic("programming error: lookup on non-root trie node") diff --git a/vendor/github.com/osbuild/images/pkg/platform/x86_64.go b/vendor/github.com/osbuild/images/pkg/platform/x86_64.go index 3d7d1c7b8..8186641a8 100644 --- a/vendor/github.com/osbuild/images/pkg/platform/x86_64.go +++ b/vendor/github.com/osbuild/images/pkg/platform/x86_64.go @@ -4,8 +4,6 @@ import ( "github.com/osbuild/images/pkg/arch" ) -type X86BootLoader uint64 - type X86 struct { BasePlatform BIOS bool diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index b78fa47d1..2ccd82afd 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -173,13 +173,12 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) { } // EnableTriggerComplete enables triggering complete event. It's effective -// only for bars which were constructed with `total <= 0` and after total -// has been set with `(*Bar).SetTotal(int64, false)`. If `curren >= total` +// only for bars which were constructed with `total <= 0`. If `curren >= total` // at the moment of call, complete event is triggered right away. func (b *Bar) EnableTriggerComplete() { select { case b.operateState <- func(s *bState) { - if s.triggerComplete || s.total <= 0 { + if s.triggerComplete { return } if s.current >= s.total { @@ -196,10 +195,10 @@ func (b *Bar) EnableTriggerComplete() { // SetTotal sets total to an arbitrary value. It's effective only for bar // which was constructed with `total <= 0`. Setting total to negative value -// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster. If -// triggerCompletion is true, total value is set to current and complete -// event is triggered right away. -func (b *Bar) SetTotal(total int64, triggerCompletion bool) { +// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster. +// If `complete` is true complete event is triggered right away. +// Calling `(*Bar).EnableTriggerComplete` makes this one no operational. +func (b *Bar) SetTotal(total int64, complete bool) { select { case b.operateState <- func(s *bState) { if s.triggerComplete { @@ -210,7 +209,7 @@ func (b *Bar) SetTotal(total int64, triggerCompletion bool) { } else { s.total = total } - if triggerCompletion { + if complete { s.current = s.total s.completed = true b.triggerCompletion(s) diff --git a/vendor/github.com/vmware/govmomi/govc/flags/datacenter.go b/vendor/github.com/vmware/govmomi/govc/flags/datacenter.go index d73f36a81..7cfcef3bd 100644 --- a/vendor/github.com/vmware/govmomi/govc/flags/datacenter.go +++ b/vendor/github.com/vmware/govmomi/govc/flags/datacenter.go @@ -174,7 +174,7 @@ func (flag *DatacenterFlag) ManagedObject(ctx context.Context, arg string) (type for _, o := range l { objs = append(objs, o.Object.Reference()) } - return ref, fmt.Errorf("%d objects at path %q: %s", len(l), arg, objs) + return ref, fmt.Errorf("%d objects match %q: %s (unique inventory path required)", len(l), arg, objs) } } diff --git a/vendor/github.com/vmware/govmomi/internal/helpers.go b/vendor/github.com/vmware/govmomi/internal/helpers.go index 41e533fd7..859469a09 100644 --- a/vendor/github.com/vmware/govmomi/internal/helpers.go +++ b/vendor/github.com/vmware/govmomi/internal/helpers.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2020-2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2020-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -24,6 +24,7 @@ import ( "net/url" "os" "path" + "slices" "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" @@ -51,6 +52,15 @@ func InventoryPath(entities []mo.ManagedEntity) string { return val } +var vsanFS = []string{ + string(types.HostFileSystemVolumeFileSystemTypeVsan), + string(types.HostFileSystemVolumeFileSystemTypeVVOL), +} + +func IsDatastoreVSAN(ds mo.Datastore) bool { + return slices.Contains(vsanFS, ds.Summary.Type) +} + func HostSystemManagementIPs(config []types.VirtualNicManagerNetConfig) []net.IP { var ips []net.IP diff --git a/vendor/github.com/vmware/govmomi/internal/version/version.go b/vendor/github.com/vmware/govmomi/internal/version/version.go index c8c9969a1..f0c027c6f 100644 --- a/vendor/github.com/vmware/govmomi/internal/version/version.go +++ b/vendor/github.com/vmware/govmomi/internal/version/version.go @@ -21,5 +21,5 @@ const ( ClientName = "govmomi" // ClientVersion is the version of this SDK - ClientVersion = "0.39.0" + ClientVersion = "0.42.0" ) diff --git a/vendor/github.com/vmware/govmomi/object/namespace_manager.go b/vendor/github.com/vmware/govmomi/object/namespace_manager.go index f463b368c..c4e3b6c1f 100644 --- a/vendor/github.com/vmware/govmomi/object/namespace_manager.go +++ b/vendor/github.com/vmware/govmomi/object/namespace_manager.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2015 VMware, Inc. All Rights Reserved. +Copyright (c) 2016-2024 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -74,3 +74,22 @@ func (nm DatastoreNamespaceManager) DeleteDirectory(ctx context.Context, dc *Dat return nil } + +func (nm DatastoreNamespaceManager) ConvertNamespacePathToUuidPath(ctx context.Context, dc *Datacenter, datastoreURL string) (string, error) { + req := &types.ConvertNamespacePathToUuidPath{ + This: nm.Reference(), + NamespaceUrl: datastoreURL, + } + + if dc != nil { + ref := dc.Reference() + req.Datacenter = &ref + } + + res, err := methods.ConvertNamespacePathToUuidPath(ctx, nm.c, req) + if err != nil { + return "", err + } + + return res.Returnval, nil +} diff --git a/vendor/github.com/vmware/govmomi/object/option_value_list.go b/vendor/github.com/vmware/govmomi/object/option_value_list.go index 9f91253cc..2bdcce39f 100644 --- a/vendor/github.com/vmware/govmomi/object/option_value_list.go +++ b/vendor/github.com/vmware/govmomi/object/option_value_list.go @@ -19,6 +19,8 @@ package object import ( "fmt" "reflect" + "slices" + "strings" "github.com/vmware/govmomi/vim25/types" ) @@ -44,6 +46,65 @@ func OptionValueListFromMap[T any](in map[string]T) OptionValueList { return out } +// IsTrue returns true if the specified key exists with an empty value or value +// equal to 1, "1", "on", "t", true, "true", "y", or "yes". +// All string comparisons are case-insensitive. +func (ov OptionValueList) IsTrue(key string) bool { + return ov.isTrueOrFalse(key, true, 1, "", "1", "on", "t", "true", "y", "yes") +} + +// IsFalse returns true if the specified key exists and has a value equal to +// 0, "0", "f", false, "false", "n", "no", or "off". +// All string comparisons are case-insensitive. +func (ov OptionValueList) IsFalse(key string) bool { + return ov.isTrueOrFalse(key, false, 0, "0", "f", "false", "n", "no", "off") +} + +func (ov OptionValueList) isTrueOrFalse( + key string, + boolVal bool, + numVal int, + strVals ...string) bool { + + val, ok := ov.Get(key) + if !ok { + return false + } + + switch tval := val.(type) { + case string: + return slices.Contains(strVals, strings.ToLower(tval)) + case bool: + return tval == boolVal + case uint: + return tval == uint(numVal) + case uint8: + return tval == uint8(numVal) + case uint16: + return tval == uint16(numVal) + case uint32: + return tval == uint32(numVal) + case uint64: + return tval == uint64(numVal) + case int: + return tval == int(numVal) + case int8: + return tval == int8(numVal) + case int16: + return tval == int16(numVal) + case int32: + return tval == int32(numVal) + case int64: + return tval == int64(numVal) + case float32: + return tval == float32(numVal) + case float64: + return tval == float64(numVal) + } + + return false +} + // Get returns the value if exists, otherwise nil is returned. The second return // value is a flag indicating whether the value exists or nil was the actual // value. diff --git a/vendor/github.com/vmware/govmomi/ovf/importer/spec.go b/vendor/github.com/vmware/govmomi/ovf/importer/spec.go index a0028ec78..5afd00b89 100644 --- a/vendor/github.com/vmware/govmomi/ovf/importer/spec.go +++ b/vendor/github.com/vmware/govmomi/ovf/importer/spec.go @@ -99,9 +99,9 @@ func Spec(fpath string, a Archive, hidden, verbose bool) (*Options, error) { } o := Options{ - DiskProvisioning: allDiskProvisioningOptions[0], - IPAllocationPolicy: allIPAllocationPolicyOptions[0], - IPProtocol: allIPProtocolOptions[0], + DiskProvisioning: string(types.OvfCreateImportSpecParamsDiskProvisioningTypeFlat), + IPAllocationPolicy: string(types.VAppIPAssignmentInfoIpAllocationPolicyDhcpPolicy), + IPProtocol: string(types.VAppIPAssignmentInfoProtocolsIPv4), MarkAsTemplate: false, PowerOn: false, WaitForIP: false, diff --git a/vendor/github.com/vmware/govmomi/ovf/parser.go b/vendor/github.com/vmware/govmomi/ovf/parser.go new file mode 100644 index 000000000..5afdb4ff9 --- /dev/null +++ b/vendor/github.com/vmware/govmomi/ovf/parser.go @@ -0,0 +1,253 @@ +/* +Copyright (c) 2024-2024 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ovf + +import ( + "math" + "regexp" + "strconv" + "strings" +) + +// These are used to validate the overall structure of the string being parsed and to differentiate tokens as we are +// processing them +var ( + blankRegexp = regexp.MustCompile(`[[:blank:]]`) + validIntegerRegexp = regexp.MustCompile(`^([1-9]\d*)$`) + validExponentRegexp = regexp.MustCompile(`^([1-9]\d*\^[1-9]\d*)$`) + validByteUnitRegexp = regexp.MustCompile(`((^|kilo|kibi|mega|mebi|giga|gibi)byte(s?)$)`) + validCapacityRegexp = regexp.MustCompile(`^[[:blank:]]*((([1-9]\d*\^[1-9]\d*)|([1-9]\d*))($|[[:blank:]]*\*[[:blank:]]*))*(([a-zA-Z]*(b|B)(y|Y)(t|T)(e|E)(s|S)?)($|([[:blank:]]*\*[[:blank:]]*(([1-9]\d*\^[1-9]\d*)|([1-9]\d*)))*))?$`) +) + +// We only handle kilo, kibi, mega, mebi, giga, gibi prefixes due to size constraints of int64/uint64, but more +// importantly because prefixes larger than giga & gibi don't make sense for our use-case +var prefixMultipliers = map[string]int64{ + "byte": 1, // byte + "kilobyte": 1 * 1000, // byte * 1000 + "kibibyte": 1 * 1024, // byte * 1024 + "megabyte": 1 * 1000 * 1000, // byte * 1000 * 1000 = kilobyte * 1000 + "mebibyte": 1 * 1024 * 1024, // byte * 1024 * 1024 = kibibyte * 1024 + "gigabyte": 1 * 1000 * 1000 * 1000, // byte * 1000 * 1000 * 1000 = kilobyte * 1000 * 1000 = megabyte * 1000 + "gibibyte": 1 * 1024 * 1024 * 1024, // byte * 1024 * 1024 * 1024 = kibibyte * 1024 * 1024 = mebibyte * 1024 +} + +// ParseCapacityAllocationUnits validates the string s is a valid programmatic unit with respect to the base unit 'byte' +// and parses the string to return the number of bytes s represents +func ParseCapacityAllocationUnits(s string) int64 { + // Any strings which don't match against the regular expression are deemed invalid and zero is returned as the result + if !validCapacityString(s) { + return 0 + } + var capacityBytes int64 = 1 + // Remove any whitespace in s and lowercase any alphabetic characters. Removal of whitespace is done after + // validating against the regular expression because whitespace is valid for the most part, but is not valid + // for exponential terms, e.g 2 ^ 10 + s = strings.ToLower(blankRegexp.ReplaceAllString(s, "")) + // Split s on multiplication operator (*) so that we can just calculate integer multipliers. Each token will + // then be either an integer, an exponential term to be converted to an integer, or a unit term to be converted + // to an integer + tokens := strings.Split(s, "*") + + // Loop through all tokens and convert any to integers if necessary and use to compute a running product + for _, token := range tokens { + switch { + // "" should be treated identically to "byte". capacityBytes is already set to 1 so there is nothing to do + case len(token) == 0: + continue + case validByteUnitString(token): + capacityBytes = capacityBytes * prefixMultipliers[strings.TrimSuffix(token, "s")] + case validExponentString(token): + p := strings.Split(token, "^") + b, _ := strconv.ParseInt(p[0], 10, 64) + e, _ := strconv.ParseInt(p[1], 10, 64) + capacityBytes = capacityBytes * int64(math.Pow(float64(b), float64(e))) + case validIntegerString(token): + n, _ := strconv.ParseInt(token, 10, 64) + capacityBytes = capacityBytes * n + default: + // This should be unreachable. validCapacityString should have filtered out anything that cannot be + // matched by the non-default cases + capacityBytes = 0 + } + } + return capacityBytes +} + +// validIntegerString matches the string s against the regular expression `^([1-9]\d*)$`; i.e. s should be of the form: +// any non-zero digit ([1-9]), followed by zero or more digits (\d*) +func validIntegerString(s string) bool { + return validIntegerRegexp.MatchString(s) +} + +// validExponentString matches the string s against the regular expression `^([1-9]\d*\^[1-9]\d*)$`; i.e. s should be of +// the form: any non-zero digit ([1-9]), followed by a caret (^) followed by any non-zero digit ([1-9]), followed by zero +// or more digits (\d*) +func validExponentString(s string) bool { + return validExponentRegexp.MatchString(s) +} + +// validByteUnitString matches the string s against a regular expression which only allows a unit of byte +// (optionally plural) with a valid decimal or binary prefix. See prefixMultipliers +func validByteUnitString(s string) bool { + return validByteUnitRegexp.MatchString(s) +} + +// validCapacityString matches the string s against the regular expression validCapacityRegexp and verifies that s is a +// valid programmatic unit with respect to the base unit 'byte'. +// +// Per the OVF schema defined in DSP8023: "If not specified default value is bytes. Value shall match a recognized value +// for the UNITS qualifier in DSP0004" +// +// DSP004 defines a programmatic unit as: +// +// programmatic-unit = [ sign ] *S unit-element *( *S unit-operator *S unit-element ) +// sign = HYPHEN +// unit-element = number / [ prefix ] base-unit [ CARET exponent ] +// unit-operator = "*" / "/" +// number = floatingpoint-number / exponent-number +// +// ; An exponent shall be interpreted as a floating point number +// ; with the specified decimal base and exponent and a mantissa of 1 +// exponent-number = base CARET exponent +// base = integer-number +// exponent = [ sign ] integer-number +// +// ; An integer shall be interpreted as a decimal integer number +// integer-number = NON-ZERO-DIGIT *( DIGIT ) +// +// ; A float shall be interpreted as a decimal floating point number +// floatingpoint-number = 1*( DIGIT ) [ "." ] *( DIGIT ) +// +// ; A prefix for a base unit (e.g. "kilo"). The numeric equivalents of +// ; these prefixes shall be interpreted as multiplication factors for the +// ; directly succeeding base unit. In other words, if a prefixed base +// ; unit is in the denominator of the overall programmatic unit, the +// ; numeric equivalent of that prefix is also in the denominator +// prefix = decimal-prefix / binary-prefix +// +// ; SI decimal prefixes as defined in ISO 1000 +// decimal-prefix = +// +// "deca" ; 10^1 +// / "hecto" ; 10^2 +// / "kilo" ; 10^3 +// / "mega" ; 10^6 +// / "giga" ; 10^9 +// / "tera" ; 10^12 +// / "peta" ; 10^15 +// / "exa" ; 10^18 +// / "zetta" ; 10^21 +// / "yotta" ; 10^24 +// / "deci" ; 10^-1 +// / "centi" ; 10^-2 +// / "milli" ; 10^-3 +// / "micro" ; 10^-6 +// / "nano" ; 10^-9 +// / "pico" ; 10^-12 +// / "femto" ; 10^-15 +// / "atto" ; 10^-18 +// / "zepto" ; 10^-21 +// / "yocto" ; 10^-24 +// +// ; IEC binary prefixes as defined in IEC 80000-13 +// binary-prefix = +// +// "kibi" ; 2^10 +// / "mebi" ´ ; 2^20 +// / "gibi" ; 2^30 +// / "tebi" ; 2^40 +// / "pebi" ; 2^50 +// / "exbi" ; 2^60 +// / "zebi" ; 2^70 +// / "yobi" ; 2^80 +// +// ; The name of a base unit +// base-unit = standard-unit / extension-unit +// +// ; The name of a standard base unit +// standard-unit = UNIT-IDENTIFIER +// +// ; The name of an extension base unit. If UNIT-IDENTIFIER begins with a +// ; prefix (see prefix ABNF rule), the meaning of that prefix shall not be +// ; changed by the extension base unit (examples of this for standard base +// ; units are "decibel" or "kilogram") +// ; extension-unit = org-id COLON UNIT-IDENTIFIER +// +// ; org-id shall include a copyrighted, trademarked, or otherwise unique +// ; name that is owned by the business entity that is defining the +// ; extension unit, or that is a registered ID assigned to the business +// ; entity by a recognized global authority. org-id shall not begin with +// ; a prefix (see prefix ABNF rule) +// org-id = UNIT-IDENTIFIER +// UNIT-IDENTIFIER = FIRST-UNIT-CHAR [ *( MID-UNIT-CHAR ) +// LAST-UNIT-CHAR ] +// FIRST-UNIT-CHAR = UPPERALPHA / LOWERALPHA / UNDERSCORE +// LAST-UNIT-CHAR = FIRST-UNIT-CHAR / DIGIT / PARENS +// MID-UNIT-CHAR = LAST-UNIT-CHAR / HYPHEN / S +// +// DIGIT = ZERO / NON-ZERO-DIGIT +// ZERO = "0" +// NON-ZERO-DIGIT = "1"-"9" +// HYPHEN = U+002D ; "-" +// CARET = U+005E ; "^" +// COLON = U+003A ; ":" +// UPPERALPHA = U+0041-005A ; "A" ... "Z" +// LOWERALPHA = U+0061-007A ; "a" ... "z" +// UNDERSCORE = U+005F ; "_" +// PARENS = U+0028 / U+0029 ; "(", ")" +// S = U+0020 ; " " +// +// This definition is further restricted as such a broad definition by the above grammar does not make sense in the +// context of virtual disk capacity. +// +// We do not allow for negative values, division operations, floating-point numbers, negative exponents, nor the use of +// multiple units. Furthermore, we limit the allowed decimal and binary prefixes. This gives us: +// +// programmatic-unit = +// +// number +// / [prefix] base-unit +// / number *( *S unit-operator *S number) *S unit-operator *S [prefix] base-unit +// / [prefix] base-unit *( *S unit-operator *S number) +// / number *( *S unit-operator *S number) *S unit-operator *S [prefix] base-unit *( *S unit-operator *S number) +// +// unit-operator = "*" +// number = integer-number / exponent-number +// exponent-number = base CARET exponent +// base = integer-number +// exponent = integer-number +// integer-number = NON-ZERO-DIGIT *( DIGIT ) +// prefix = decimal-prefix / binary-prefix +// +// decimal-prefix = +// +// "kilo" ; 10^3 +// / "mega" ; 10^6 +// / "giga" ; 10^9 +// +// binary-prefix = +// +// "kibi" ; 2^10 +// / "mebi" ; 2^20 +// / "gibi" ; 2^30 +// +// This function and the regular expression validCapacityRegexp are used to verify that the string we are parsing follows +// our above restricted grammar +func validCapacityString(s string) bool { + // Integer followed by a trailing '*' is not handled by the regular expression and so is explicitly checked + return validCapacityRegexp.MatchString(s) && !strings.HasSuffix(s, "*") +} diff --git a/vendor/github.com/vmware/govmomi/property/collector.go b/vendor/github.com/vmware/govmomi/property/collector.go index 263621a06..9ee78d0d3 100644 --- a/vendor/github.com/vmware/govmomi/property/collector.go +++ b/vendor/github.com/vmware/govmomi/property/collector.go @@ -276,7 +276,7 @@ func (p *Collector) RetrieveOne(ctx context.Context, obj types.ManagedObjectRefe // propagation. func (p *Collector) WaitForUpdatesEx( ctx context.Context, - opts WaitOptions, + opts *WaitOptions, onUpdatesFn func([]types.ObjectUpdate) bool) error { if !p.mu.TryLock() { diff --git a/vendor/github.com/vmware/govmomi/property/wait.go b/vendor/github.com/vmware/govmomi/property/wait.go index 07ea3cb5d..dae7bf383 100644 --- a/vendor/github.com/vmware/govmomi/property/wait.go +++ b/vendor/github.com/vmware/govmomi/property/wait.go @@ -123,7 +123,7 @@ func WaitForUpdates( return err } - return pc.WaitForUpdatesEx(ctx, filter.WaitOptions, onUpdatesFn) + return pc.WaitForUpdatesEx(ctx, &filter.WaitOptions, onUpdatesFn) } // WaitForUpdates waits for any of the specified properties of the specified @@ -166,5 +166,5 @@ func WaitForUpdatesEx( }() - return pc.WaitForUpdatesEx(ctx, filter.WaitOptions, onUpdatesFn) + return pc.WaitForUpdatesEx(ctx, &filter.WaitOptions, onUpdatesFn) } diff --git a/vendor/github.com/vmware/govmomi/session/manager.go b/vendor/github.com/vmware/govmomi/session/manager.go index e2d70a2f6..6a24fb954 100644 --- a/vendor/github.com/vmware/govmomi/session/manager.go +++ b/vendor/github.com/vmware/govmomi/session/manager.go @@ -291,3 +291,19 @@ func (sm *Manager) UpdateServiceMessage(ctx context.Context, message string) err return err } + +func (sm *Manager) ImpersonateUser(ctx context.Context, name string) error { + req := types.ImpersonateUser{ + This: sm.Reference(), + UserName: name, + Locale: Locale, + } + + res, err := methods.ImpersonateUser(ctx, sm.client, &req) + if err != nil { + return err + } + + sm.userSession = &res.Returnval + return nil +} diff --git a/vendor/github.com/vmware/govmomi/vapi/library/finder/path.go b/vendor/github.com/vmware/govmomi/vapi/library/finder/path.go index 7605627f1..10780a29c 100644 --- a/vendor/github.com/vmware/govmomi/vapi/library/finder/path.go +++ b/vendor/github.com/vmware/govmomi/vapi/library/finder/path.go @@ -23,6 +23,7 @@ import ( "path" "strings" + "github.com/vmware/govmomi/internal" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vapi/library" @@ -134,6 +135,29 @@ func (f *PathFinder) datastoreName(ctx context.Context, id string) (string, erro return name, nil } +func (f *PathFinder) convertPath(ctx context.Context, b *mo.Datastore, path string) (string, error) { + if !internal.IsDatastoreVSAN(*b) { + return path, nil + } + + var dc *object.Datacenter + + entities, err := mo.Ancestors(ctx, f.c, f.c.ServiceContent.PropertyCollector, b.Self) + if err != nil { + return "", err + } + + for _, entity := range entities { + if entity.Self.Type == "Datacenter" { + dc = object.NewDatacenter(f.c, entity.Self) + break + } + } + + m := object.NewDatastoreNamespaceManager(f.c) + return m.ConvertNamespacePathToUuidPath(ctx, dc, path) +} + // ResolveLibraryItemStorage transforms StorageURIs Datastore url (uuid) to Datastore name. func (f *PathFinder) ResolveLibraryItemStorage(ctx context.Context, storage []library.Storage) error { // TODO: @@ -154,7 +178,8 @@ func (f *PathFinder) ResolveLibraryItemStorage(ctx context.Context, storage []li var ds []mo.Datastore pc := property.DefaultCollector(f.c) - if err := pc.Retrieve(ctx, ids, []string{"name", "info.url"}, &ds); err != nil { + props := []string{"name", "summary.url", "summary.type"} + if err := pc.Retrieve(ctx, ids, props, &ds); err != nil { return err } @@ -164,12 +189,21 @@ func (f *PathFinder) ResolveLibraryItemStorage(ctx context.Context, storage []li for _, item := range storage { b := backing[item.StorageBacking.DatastoreID] - dsurl := b.Info.GetDatastoreInfo().Url + dsurl := b.Summary.Url for i := range item.StorageURIs { - u := strings.TrimPrefix(item.StorageURIs[i], dsurl) + uri, err := url.Parse(item.StorageURIs[i]) + if err != nil { + return err + } + uri.Path = path.Clean(uri.Path) // required for ConvertNamespacePathToUuidPath() + uri.RawQuery = "" + u, err := f.convertPath(ctx, b, uri.String()) + if err != nil { + return err + } + u = strings.TrimPrefix(u, dsurl) u = strings.TrimPrefix(u, "/") - u = strings.SplitN(u, "?", 2)[0] // strip query, if any item.StorageURIs[i] = (&object.DatastorePath{ Datastore: b.Name, diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 06282ce79..a199b36b4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -1,20 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( + "google.golang.org/grpc/stats" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" @@ -31,18 +22,26 @@ const ( GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) -// Filter is a predicate used to determine whether a given request in -// interceptor info should be traced. A Filter must return true if +// InterceptorFilter is a predicate used to determine whether a given request in +// interceptor info should be instrumented. A InterceptorFilter must return true if // the request should be traced. -type Filter func(*InterceptorInfo) bool +// +// Deprecated: Use stats handlers instead. +type InterceptorFilter func(*InterceptorInfo) bool + +// Filter is a predicate used to determine whether a given request in +// should be instrumented by the attatched RPC tag info. +// A Filter must return true if the request should be instrumented. +type Filter func(*stats.RPCTagInfo) bool // config is a group of options for this instrumentation. type config struct { - Filter Filter - Propagators propagation.TextMapPropagator - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider - SpanStartOptions []trace.SpanStartOption + Filter Filter + InterceptorFilter InterceptorFilter + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + SpanStartOptions []trace.SpanStartOption ReceivedEvent bool SentEvent bool @@ -163,15 +162,30 @@ func (o tracerProviderOption) apply(c *config) { // WithInterceptorFilter returns an Option to use the request filter. // // Deprecated: Use stats handlers instead. -func WithInterceptorFilter(f Filter) Option { +func WithInterceptorFilter(f InterceptorFilter) Option { return interceptorFilterOption{f: f} } type interceptorFilterOption struct { - f Filter + f InterceptorFilter } func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.InterceptorFilter = o.f + } +} + +// WithFilter returns an Option to use the request filter. +func WithFilter(f Filter) Option { + return filterOption{f: f} +} + +type filterOption struct { + f Filter +} + +func (o filterOption) apply(c *config) { if o.f != nil { c.Filter = o.f } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go index 958dcd87a..b8b836b00 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otelgrpc is the instrumentation library for [google.golang.org/grpc]. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 3b487a936..7f19058e4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -59,7 +48,7 @@ var ( ) // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { @@ -81,7 +70,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { Method: method, Type: UnaryClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return invoker(ctx, method, req, reply, cc, callOpts...) } @@ -196,7 +185,7 @@ func (w *clientStream) CloseSend() error { return err } -func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { +func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream { return &clientStream{ ClientStream: s, span: span, @@ -219,7 +208,7 @@ func (w *clientStream) endSpan(err error) { } // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.Dial call. +// for use in a grpc.NewClient call. // // Deprecated: Use [NewClientHandler] instead. func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { @@ -241,7 +230,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { Method: method, Type: StreamClient, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return streamer(ctx, desc, cc, method, callOpts...) } @@ -270,7 +259,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { span.End() return s, err } - stream := wrapClientStream(ctx, s, desc, span, cfg) + stream := wrapClientStream(s, desc, span, cfg) return stream, nil } } @@ -296,7 +285,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { UnaryServerInfo: info, Type: UnaryServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(ctx, req) } @@ -422,7 +411,7 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { StreamServerInfo: info, Type: StreamServer, } - if cfg.Filter != nil && !cfg.Filter(i) { + if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) { return handler(srv, wrapServerStream(ctx, ss, cfg)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go index f6116946b..b62f7cd7c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index cf32a9e97..bef07b7a3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go index f585fb6ae..3aa37915d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go index b65fab308..409c621b7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 73d2b8b6b..fad58733f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" @@ -38,6 +27,7 @@ type gRPCContext struct { messagesReceived int64 messagesSent int64 metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -77,6 +67,10 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont gctx := gRPCContext{ metricAttrs: attrs, + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return context.WithValue(ctx, gRPCContextKey{}, &gctx) } @@ -113,6 +107,10 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont gctx := gRPCContext{ metricAttrs: attrs, + record: true, + } + if h.config.Filter != nil { + gctx.record = h.config.Filter(info) } return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) @@ -141,6 +139,9 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) if gctx != nil { + if !gctx.record { + return + } metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) metricAttrs = append(metricAttrs, gctx.metricAttrs...) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index d633c4bef..3f9cfda54 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.49.0" + return "0.52.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 92b8cf73c..6aae83bfd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -23,7 +12,7 @@ import ( ) // DefaultClient is the default Client and is used by Get, Head, Post and PostForm. -// Please be careful of intitialization order - for example, if you change +// Please be careful of initialization order - for example, if you change // the global propagator, the DefaultClient might still be using the old one. var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index cabf645a5..214acaf58 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a1b5b5e5a..f0a9bb9ef 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -111,7 +100,7 @@ func WithPublicEndpoint() Option { }) } -// WithPublicEndpointFn runs with every request, and allows conditionnally +// WithPublicEndpointFn runs with every request, and allows conditionally // configuring the Handler to link the span with an incoming span context. If // this option is not provided or returns false, then the association is a // child association instead of a link. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go index 38c7f01c7..56b24b982 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package otelhttp provides an http.Handler and functions that are intended // to be used to add tracing by wrapping existing handlers (with Handler) and diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 1fc15019e..d01bdccf4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -1,32 +1,20 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" import ( - "io" "net/http" "time" "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -46,6 +34,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool + traceSemconv semconv.HTTPServer requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram @@ -67,6 +56,8 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, + + traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -143,12 +134,9 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), - } - if h.server != "" { - hostAttr := semconv.NetHostName(h.server) - opts = append(opts, trace.WithAttributes(hostAttr)) + trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), } + opts = append(opts, h.spanStartOptions...) if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { opts = append(opts, trace.WithNewRoot()) @@ -217,23 +205,36 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { return rww.WriteHeader }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, }) - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } next.ServeHTTP(w, r.WithContext(ctx)) - setAfterServeAttributes(span, bw.read.Load(), rww.written, rww.statusCode, bw.err, rww.err) + span.SetStatus(semconv.ServerStatus(rww.statusCode)) + span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: rww.statusCode, + ReadBytes: bw.read.Load(), + ReadError: bw.err, + WriteBytes: rww.written, + WriteError: rww.err, + })...) // Add metrics attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) if rww.statusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } - o := metric.WithAttributes(attributes...) - h.requestBytesCounter.Add(ctx, bw.read.Load(), o) - h.responseBytesCounter.Add(ctx, rww.written, o) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + h.responseBytesCounter.Add(ctx, rww.written, addOpts...) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) @@ -241,37 +242,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http h.serverLatencyMeasure.Record(ctx, elapsedTime, o) } -func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { - attributes := []attribute.KeyValue{} - - // TODO: Consider adding an event after each read and write, possibly as an - // option (defaulting to off), so as to not create needlessly verbose spans. - if read > 0 { - attributes = append(attributes, ReadBytesKey.Int64(read)) - } - if rerr != nil && rerr != io.EOF { - attributes = append(attributes, ReadErrorKey.String(rerr.Error())) - } - if wrote > 0 { - attributes = append(attributes, WroteBytesKey.Int64(wrote)) - } - if statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) - } - span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) - - if werr != nil && werr != io.EOF { - attributes = append(attributes, WriteErrorKey.String(werr.Error())) - } - span.SetAttributes(attributes...) -} - // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { + attr := semconv.NewHTTPServer().Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - attr := semconv.HTTPRouteKey.String(route) - span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go new file mode 100644 index 000000000..3ec0ad00c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "os" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct { + duplicate bool +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} + +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} + +func NewHTTPServer() HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) + return HTTPServer{duplicate: env == "http/dup"} +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go new file mode 100644 index 000000000..e7f293761 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go new file mode 100644 index 000000000..c3e838aaa --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "errors" + "io" + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +type oldHTTPServer struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attributes := []attribute.KeyValue{} + + if resp.ReadBytes > 0 { + attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) + } + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) + } + if resp.StatusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) + } + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) + } + + return attributes +} + +// Route returns the attribute for the route. +func (o oldHTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +// HTTPStatusCode returns the attribute for the HTTP status code. +// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. +func HTTPStatusCode(status int) attribute.KeyValue { + return semconv.HTTPStatusCode(status) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go new file mode 100644 index 000000000..0c5d4c460 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go index edf4ce3d3..7aa5f99e8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index 0efd5261f..a73bb06e9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -2,18 +2,7 @@ // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index d3a06e0ca..a9a9226b3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -2,17 +2,7 @@ // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" @@ -102,7 +92,7 @@ func (c *netConv) Host(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { - attrs = append(attrs, c.HostPort(int(p))) + attrs = append(attrs, c.HostPort(p)) } return attrs } @@ -148,7 +138,7 @@ func (c *netConv) Peer(address string) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { - attrs = append(attrs, c.PeerPort(int(p))) + attrs = append(attrs, c.PeerPort(p)) } return attrs } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index 26a51a180..ea504e396 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -48,8 +37,12 @@ type labelerContextKeyType int const lablelerContextKey labelerContextKeyType = 0 -func injectLabeler(ctx context.Context, l *Labeler) context.Context { - return context.WithValue(ctx, lablelerContextKey, l) +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 43e937a67..0d3cb2e4a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -22,15 +11,14 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) // Transport implements the http.RoundTripper interface and wraps @@ -148,8 +136,10 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) } - labeler := &Labeler{} - ctx = injectLabeler(ctx, labeler) + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. @@ -181,11 +171,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } - o := metric.WithAttributes(metricAttrs...) - t.requestBytesCounter.Add(ctx, bw.read.Load(), o) + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.responseBytesCounter.Add(ctx, n, addOpts...) } // traces diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 35254e888..b0957f28c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -1,22 +1,11 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.49.0" + return "0.53.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go index 2852ec971..948f8406c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" @@ -98,3 +87,13 @@ func (w *respWriterWrapper) WriteHeader(statusCode int) { } w.ResponseWriter.WriteHeader(statusCode) } + +func (w *respWriterWrapper) Flush() { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore index 120b63a9c..6bf3abc41 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellignore +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -5,3 +5,5 @@ collison consequentially ans nam +valu +thirdparty diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc index 4afbb1fb3..e2cb3ea94 100644 --- a/vendor/go.opentelemetry.io/otel/.codespellrc +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -5,6 +5,6 @@ check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 -skip = .git,go.mod,go.sum,semconv,venv,.tools +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools uri-ignore-words-list = * write = diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f5698..000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a62511f38..6d9c8b649 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -11,6 +11,7 @@ linters: enable: - depguard - errcheck + - errorlint - godot - gofumpt - goimports @@ -21,8 +22,11 @@ linters: - misspell - revive - staticcheck + - tenv - typecheck + - unconvert - unused + - unparam issues: # Maximum issues count per one linter. @@ -124,6 +128,8 @@ linters-settings: - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 98f2d2043..c01e6998e 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,159 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + ## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 This release is the last to support [Go 1.20]. @@ -22,6 +175,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -138,7 +292,7 @@ See our [versioning policy](VERSIONING.md) for more information about these stab ## [1.20.0/0.43.0] 2023-11-10 -This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added @@ -170,15 +324,15 @@ This release brings a breaking change for custom trace API implementations. Some - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. - Implementors need to update their implementations based on what they want the default behavior of the interface to be. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) @@ -814,7 +968,7 @@ The next release will require at least [Go 1.19]. - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) @@ -1859,7 +2013,7 @@ with major version 0. - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) @@ -2849,7 +3003,11 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.24.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 [1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 [1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 [1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 @@ -2937,3 +3095,5 @@ It contains api and sdk for trace and meter. [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 31d336d92..202554933 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index c9f2bac55..b86572f58 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way: [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is an example of a very well-documented package. +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + ## Style Guide One of the primary goals of this project is that it is actually used by @@ -560,6 +570,9 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. @@ -615,17 +628,15 @@ should be canceled. ### Approvers -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers -- [David Ashpole](https://github.com/dashpole), Google - [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus @@ -633,6 +644,8 @@ should be canceled. - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 6de95219b..f33619f76 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -1,16 +1,5 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 TOOLS_MOD_DIR := ./internal/tools @@ -25,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default -ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools $(TOOLS): @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ $(GO) build -o $@ $(PACKAGE) @@ -50,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -81,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -110,7 +96,7 @@ $(PYTOOLS): @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" # Install python packages into the virtual environment. -$(PYTOOLS)/%: | $(PYTOOLS) +$(PYTOOLS)/%: $(PYTOOLS) @$(DOCKERPY) $(PIP) install -r requirements.txt CODESPELL = $(PYTOOLS)/codespell @@ -124,18 +110,18 @@ generate: go-generate vanity-import-fix .PHONY: go-generate go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate/%: DIR=$* -go-generate/%: | $(STRINGER) $(GOTMPL) +go-generate/%: $(STRINGER) $(GOTMPL) @echo "$(GO) generate $(DIR)/..." \ && cd $(DIR) \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... .PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) +vanity-import-fix: $(PORTO) @$(PORTO) --include-internal -w . # Generate go.work file for local development. .PHONY: go-work -go-work: | $(CROSSLINK) +go-work: $(CROSSLINK) $(CROSSLINK) work --root=$(shell pwd) # Build @@ -178,7 +164,7 @@ test/%: COVERAGE_MODE = atomic COVERAGE_PROFILE = coverage.out .PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) +test-coverage: $(GOCOVMERGE) @set -e; \ printf "" > coverage.txt; \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \ @@ -209,23 +195,23 @@ golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) +golangci-lint/%: $(GOLANGCI_LINT) @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ && cd $(DIR) \ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) .PHONY: crosslink -crosslink: | $(CROSSLINK) +crosslink: $(CROSSLINK) @echo "Updating intra-repository dependencies in all go modules" \ && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink +go-mod-tidy/%: crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy -compat=1.20 + && $(GO) mod tidy -compat=1.21 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -234,23 +220,23 @@ lint-modules: go-mod-tidy lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check -vanity-import-check: | $(PORTO) +vanity-import-check: $(PORTO) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) .PHONY: misspell -misspell: | $(MISSPELL) +misspell: $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) .PHONY: govulncheck govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck/%: DIR=$* -govulncheck/%: | $(GOVULNCHECK) +govulncheck/%: $(GOVULNCHECK) @echo "govulncheck ./... in $(DIR)" \ && cd $(DIR) \ && $(GOVULNCHECK) ./... .PHONY: codespell -codespell: | $(CODESPELL) +codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) .PHONY: license-check @@ -263,15 +249,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ @@ -284,13 +261,11 @@ check-clean-work-tree: SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease @@ -302,17 +277,25 @@ gorelease/%:| $(GORELEASE) && $(GORELEASE) \ || echo "" +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + .PHONY: prerelease -prerelease: | $(MULTIMOD) +prerelease: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags -add-tags: | $(MULTIMOD) +add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown -lint-markdown: +lint-markdown: docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 7766259a5..5a8909317 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s |---------|--------------------| | Traces | Stable | | Metrics | Stable | -| Logs | In development[^1] | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -51,19 +51,16 @@ Currently, this project supports the following environments. |---------|------------|--------------| | Ubuntu | 1.22 | amd64 | | Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.20 | amd64 | | Ubuntu | 1.22 | 386 | | Ubuntu | 1.21 | 386 | -| Ubuntu | 1.20 | 386 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | | MacOS | 1.22 | amd64 | | MacOS | 1.21 | amd64 | -| MacOS | 1.20 | amd64 | | Windows | 1.22 | amd64 | | Windows | 1.21 | amd64 | -| Windows | 1.20 | amd64 | | Windows | 1.22 | 386 | | Windows | 1.21 | 386 | -| Windows | 1.20 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -100,12 +97,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0bd..940f57f3d 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 000000000..5b3da8f14 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go index dafe7424d..eef51ebc2 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index fe2bc5766..318e42fca 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index 638c213d5..be9cd922d 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index 841b271fb..f2ba89ce4 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go index 0656a04e4..d9a22c650 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go index 1ddf3ce05..3028f9a40 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index fb6da5145..bff9c7fdb 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -1,24 +1,14 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" import ( + "cmp" "encoding/json" "reflect" + "slices" "sort" - "sync" ) type ( @@ -26,23 +16,33 @@ type ( // immutable set of attributes, with an internal cache for storing // attribute encodings. // - // This type supports the Equivalent method of comparison using values of - // type Distinct. + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. Distinct struct { iface interface{} } - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). Sortable []KeyValue ) @@ -56,12 +56,6 @@ var ( iface: [0]KeyValue{}, }, } - - // sortables is a pool of Sortables used to create Sets with a user does - // not provide one. - sortables = sync.Pool{ - New: func() interface{} { return new(Sortable) }, - } ) // EmptySet returns a reference to a Set with no elements. @@ -187,13 +181,7 @@ func empty() Set { // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - srt := sortables.Get().(*Sortable) - s, _ := NewSetWithSortableFiltered(kvs, srt, nil) - sortables.Put(srt) + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set { // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) return s } @@ -220,48 +206,12 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - srt := sortables.Get().(*Sortable) - s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) - sortables.Put(srt) - return s, filtered -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs // Stable sort so the following de-duplication can implement // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) position := len(kvs) - 1 offset := position - 1 @@ -289,6 +239,35 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S return Set{equivalent: computeDistinct(kvs)}, nil } +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + // filteredToFront filters slice in-place using keep function. All KeyValues that need to // be removed are moved to the front. All KeyValues that need to be kept are // moved (in-order) to the back. The index for the first KeyValue to be kept is diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index cb21dd5c0..9ea0ecbbd 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package attribute // import "go.opentelemetry.io/otel/attribute" @@ -242,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 000000000..7d798435e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 7d27cf77d..c40c896cc 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" @@ -19,6 +8,7 @@ import ( "fmt" "net/url" "strings" + "unicode/utf8" "go.opentelemetry.io/otel/internal/baggage" ) @@ -67,10 +57,10 @@ func NewKeyProperty(key string) (Property, error) { // NewKeyValueProperty returns a new Property for key with value. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewKeyValuePropertyRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) @@ -232,13 +222,13 @@ type Member struct { hasData bool } -// NewMemberRaw returns a new Member from the passed arguments. +// NewMember returns a new Member from the passed arguments. // // The passed key must be compliant with W3C Baggage specification. -// The passed value must be precent-encoded as defined in W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. // // Notice: Consider using [NewMemberRaw] instead -// that does not require precent-encoding of the value. +// that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) @@ -309,10 +299,10 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } - // Decode a precent-encoded value. + // Decode a percent-encoded value. value, err := url.PathUnescape(val) if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } return Member{key: key, value: value, properties: props, hasData: true}, nil } @@ -345,9 +335,9 @@ func (m Member) String() string { // A key is just an ASCII string. A value is restricted to be // US-ASCII characters excluding CTLs, whitespace, // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } @@ -616,7 +606,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) { return } - // Decode a precent-encoded value. + // Decode a percent-encoded value. value, err := url.PathUnescape(s[valueStart:valueEnd]) if err != nil { return @@ -641,6 +631,95 @@ func skipSpace(s string, offset int) int { return i } +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + func validateKey(s string) bool { if len(s) == 0 { return false @@ -656,17 +735,7 @@ func validateKey(s string) bool { } func validateKeyChar(c int32) bool { - return (c >= 0x23 && c <= 0x27) || - (c >= 0x30 && c <= 0x39) || - (c >= 0x41 && c <= 0x5a) || - (c >= 0x5e && c <= 0x7a) || - c == 0x21 || - c == 0x2a || - c == 0x2b || - c == 0x2d || - c == 0x2e || - c == 0x7c || - c == 0x7e + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } func validateValue(s string) bool { @@ -679,12 +748,109 @@ func validateValue(s string) bool { return true } +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + func validateValueChar(c int32) bool { - return c == 0x21 || - (c >= 0x23 && c <= 0x2b) || - (c >= 0x2d && c <= 0x3a) || - (c >= 0x3c && c <= 0x5b) || - (c >= 0x5d && c <= 0x7e) + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] } // valueEscape escapes the string so it can be safely placed inside a baggage value, diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go index 24b34b756..a572461a0 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go index 4545100df..b51d87cab 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides functionality for storing and retrieving diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 000000000..24c52b387 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 587ebae4e..df29d96a6 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go index 4e328fbb4..ee8db448b 100644 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package codes defines the canonical error codes used by OpenTelemetry. diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 36d7c24e8..441c59501 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package otel provides global access to the OpenTelemetry API. The subpackages of diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go index 72fad8541..67414c71e 100644 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh index 9a58fb1d3..93e80ea30 100644 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -1,18 +1,7 @@ #!/usr/bin/env bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 4115fe3bb..07623b679 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" @@ -18,12 +7,8 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -var ( - // Compile-time check global.ErrDelegator implements ErrorHandler. - _ ErrorHandler = (*global.ErrDelegator)(nil) - // Compile-time check global.ErrLogger implements ErrorHandler. - _ ErrorHandler = (*global.ErrLogger)(nil) -) +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) // GetErrorHandler returns the global ErrorHandler instance. // @@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { global.Handle(err) } +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 622c3ee3f..822d84794 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package attribute provide several helper functions for some commonly used @@ -25,33 +14,33 @@ import ( // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go index b96e5408e..b4f85f44a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package baggage provides base types and functionality to store and retrieve diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 4469700d9..3aea9c491 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package baggage // import "go.opentelemetry.io/otel/internal/baggage" diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go index f532f07e9..4259f0320 100644 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index 5e9b83047..c657ff8e7 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,38 +1,13 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" - "os" "sync/atomic" ) -var ( - // GlobalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - GlobalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*ErrDelegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*ErrLogger)(nil) -) - // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry @@ -44,59 +19,18 @@ type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } -func (d *ErrDelegator) Handle(err error) { - d.getDelegate().Handle(err) -} +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) -func (d *ErrDelegator) getDelegate() ErrorHandler { - return *d.delegate.Load() +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } - -func defaultErrorHandler() *ErrDelegator { - d := &ErrDelegator{} - d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// ErrLogger logs errors if no delegate is set, otherwise they are delegated. -type ErrLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *ErrLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return GlobalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - GlobalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index ebb13c206..3a0cc42f6 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -292,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -369,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index c6f305a2b..adbca7d34 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -23,17 +12,20 @@ import ( "github.com/go-logr/stdr" ) -// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger atomic.Pointer[logr.Logger] +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() -// SetLogger overrides the globalLogger with l. +// SetLogger sets the global Logger to l. // // To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true` @@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) { globalLogger.Store(&l) } -func getLogger() logr.Logger { +// GetLogger returns the global logger. +func GetLogger() logr.Logger { return *globalLogger.Load() } // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(4).Info(msg, keysAndValues...) + GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) + GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(8).Info(msg, keysAndValues...) + GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. func Warn(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) + GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index 7ed61c0e2..cfd1df9bf 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -76,6 +65,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.meters == nil { @@ -175,6 +165,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, nil } +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableCounter(name, options...) @@ -241,6 +242,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, nil } +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableCounter(name, options...) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go index 06bac35c2..38560ff99 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index 386c8bfdc..204ea142a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -25,6 +14,10 @@ import ( ) type ( + errorHandlerHolder struct { + eh ErrorHandler + } + tracerProviderHolder struct { tp trace.TracerProvider } @@ -39,15 +32,59 @@ type ( ) var ( + globalErrorHandler = defaultErrorHandler() globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() + delegateErrorHandlerOnce sync.Once delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp @@ -137,6 +174,12 @@ func SetMeterProvider(mp metric.MeterProvider) { globalMeterProvider.Store(meterProviderHolder{mp: mp}) } +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 3f61ec12a..e31f442b4 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package global // import "go.opentelemetry.io/otel/internal/global" @@ -97,6 +86,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := il{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), } if p.tracers == nil { @@ -112,10 +102,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct { - name string - version string -} +type il struct{ name, version, schema string } // tracer is a placeholder for a trace.Tracer. // @@ -193,6 +180,9 @@ func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + // SetName does nothing. func (nonRecordingSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index e07e79400..3e7bb3b35 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package internal // import "go.opentelemetry.io/otel/internal" diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go index c4f8acd5d..6de7f2e4d 100644 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go index f95517195..1e6473b32 100644 --- a/vendor/go.opentelemetry.io/otel/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 000000000..0cf902e01 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index 072baa8e8..cf23db778 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -50,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -108,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -165,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index 9bd6ebf02..c82ba5324 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 778ad2d74..d9e3b13e4 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 54716e13b..f153745b0 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package metric provides the OpenTelemetry API used to measure metrics about @@ -68,6 +57,23 @@ asynchronous measurement, a Gauge ([Int64ObservableGauge] and See the [OpenTelemetry documentation] for more information about instruments and their intended use. +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + # Measurements Measurements are made by recording values and information about the values with @@ -164,6 +170,7 @@ It is strongly recommended that authors only embed That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 000000000..1f6e0efa7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index ae0bdbd2e..1a9dc6809 100644 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // metric API]. @@ -113,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -185,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index be89cd533..ea52e4023 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -27,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -34,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -62,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -92,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -127,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -157,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 2520bc74a..6a7991e01 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -58,17 +47,37 @@ type Meter interface { // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -78,6 +87,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The @@ -88,6 +101,10 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -98,23 +115,47 @@ type Meter interface { // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -124,6 +165,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and @@ -134,6 +179,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -144,6 +193,10 @@ type Meter interface { // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 000000000..bb8969435 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index acc9a670b..ca6fcbdc0 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package noop provides an implementation of the OpenTelemetry metric API that // produces no telemetry and minimizes used computation resources. @@ -43,6 +32,8 @@ var ( _ metric.Float64UpDownCounter = Float64UpDownCounter{} _ metric.Int64Histogram = Int64Histogram{} _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} _ metric.Int64ObservableCounter = Int64ObservableCounter{} _ metric.Float64ObservableCounter = Float64ObservableCounter{} _ metric.Int64ObservableGauge = Int64ObservableGauge{} @@ -87,6 +78,12 @@ func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int6 return Int64Histogram{}, nil } +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { @@ -123,6 +120,12 @@ func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric. return Float64Histogram{}, nil } +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { @@ -208,6 +211,20 @@ type Float64Histogram struct{ embedded.Float64Histogram } // Record performs no operation. func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + // Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record // int64 measurements. It produces no telemetry. type Int64ObservableCounter struct { diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 0a4825ae6..8403a4bad 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -39,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -92,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -144,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -183,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 56667d32f..783fdfba7 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metric // import "go.opentelemetry.io/otel/metric" @@ -144,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -183,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go index d29aaa32c..2fd949733 100644 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 000000000..e2959ac74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 303cdf1cb..552263ba7 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go index c119eb285..33a3baf15 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package propagation contains OpenTelemetry context propagators. diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index c94438f73..8c8286aab 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 63e5d6222..6870e316d 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package propagation // import "go.opentelemetry.io/otel/propagation" @@ -46,7 +35,7 @@ var ( versionPart = fmt.Sprintf("%.2X", supportedVersion) ) -// Inject set tracecontext from the Context into the carrier. +// Inject injects the trace context from ctx into carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 000000000..8c5ac55ca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index e0a43e138..ab09daf9d 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.6 +codespell==2.3.0 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 000000000..87b842c5d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go index 71a1f7748..e087c9c04 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go index 679c40c4d..c7b804bbe 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go index 9b8c559de..137acc67d 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go index d5c4b5c13..d318221e5 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go index 39a2eab3a..7e365e82c 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go index 42fc525d1..634a1dce0 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go index 8c4a7299d..21497bb6b 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md new file mode 100644 index 000000000..82e1f46b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.20.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go index 67d1d4c44..6685c392b 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go index 359c5a696..0d1f55a8f 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package semconv implements OpenTelemetry semantic conventions. // diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go index 8ac9350d2..637763932 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go index 09ff4dfdb..f40c97825 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go index 342aede95..9c1840631 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go index a2b906742..3d44dae27 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go index e449e5c3b..95d0210e3 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go index 851774148..90b1b0452 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Code generated from semantic convention specification. DO NOT EDIT. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md new file mode 100644 index 000000000..0b6cbe960 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.24.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go new file mode 100644 index 000000000..6e688345c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go @@ -0,0 +1,4387 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes FaaS attributes. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Describes Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// A file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// Describes Database attributes +const ( + // PoolNameKey is the attribute Key conforming to the "pool.name" semantic + // conventions. It represents the name of the connection pool; unique + // within the instrumented application. In case the connection pool + // implementation doesn't provide a name, then the + // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) + // should be used + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myDataSource' + PoolNameKey = attribute.Key("pool.name") + + // StateKey is the attribute Key conforming to the "state" semantic + // conventions. It represents the state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'idle' + StateKey = attribute.Key("state") +) + +var ( + // idle + StateIdle = StateKey.String("idle") + // used + StateUsed = StateKey.String("used") +) + +// PoolName returns an attribute KeyValue conforming to the "pool.name" +// semantic conventions. It represents the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, then the +// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) +// should be used +func PoolName(val string) attribute.KeyValue { + return PoolNameKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: experimental + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if the matched endpoint for the + // request had a rate-limiting policy.) + // Stability: experimental + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (if and only if the request was + // not handled.) + // Stability: experimental + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If and only if a route was + // successfully matched.) + // Stability: experimental + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// Describes JVM buffer metric attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// Describes JVM memory metric attributes. +const ( + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// Describes System metric attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU metric attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory metric attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging metric attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem metric attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network metric attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process metric attributes +const ( + // SystemProcessesStatusKey is the attribute Key conforming to the + // "system.processes.status" semantic conventions. It represents the + // process state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessesStatusKey = attribute.Key("system.processes.status") +) + +var ( + // running + SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") + // sleeping + SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") + // stopped + SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") + // defunct + SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// The attributes used to describe telemetry in the context of databases. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary Cassandra table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") + + // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" + // semantic conventions. It represents an identifier (address, unique name, + // or any other identifier) of the database instance that is executing + // queries or mutations on the current connection. This is useful in cases + // where the database is running in a clustered environment and the + // instrumentation is able to record the node executing the query. The + // client may obtain this value in databases like MySQL using queries like + // `select @@hostname`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + DBInstanceIDKey = attribute.Key("db.instance.id") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the MongoDB + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") + + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") + + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") + + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBSystemKey = attribute.Key("db.system") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary Cassandra table that the operation is acting upon, including the +// keyspace name (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// DBInstanceID returns an attribute KeyValue conforming to the +// "db.instance.id" semantic conventions. It represents an identifier (address, +// unique name, or any other identifier) of the database instance that is +// executing queries or mutations on the current connection. This is useful in +// cases where the database is running in a clustered environment and the +// instrumentation is able to record the node executing the query. The client +// may obtain this value in databases like MySQL using queries like `select +// @@hostname`. +func DBInstanceID(val string) attribute.KeyValue { + return DBInstanceIDKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the MongoDB +// collection being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'GET', 'POST', 'HEAD' + // Deprecated: use `http.request.method` instead. + HTTPMethodKey = attribute.Key("http.method") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.request.header.content-length` instead. + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 3495 + // Deprecated: use `http.response.header.content-length` instead. + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'http', 'https' + // Deprecated: use `url.scheme` instead. + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 200 + // Deprecated: use `http.response.status_code` instead. + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.path` and `url.query` instead. + HTTPTargetKey = attribute.Key("http.target") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Deprecated: use `url.full` instead. + HTTPURLKey = attribute.Key("http.url") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + // Deprecated: use `user_agent.original` instead. + HTTPUserAgentKey = attribute.Key("http.user_agent") +) + +var ( + // HTTP/1.0 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + // + // Deprecated: use `network.protocol.name` instead. + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. +// +// Deprecated: use `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. +// +// Deprecated: use `http.request.header.content-length` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. +// +// Deprecated: use `http.response.header.content-length` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. +// +// Deprecated: use `url.scheme` instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. +// +// Deprecated: use `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. +// +// Deprecated: use `url.path` and `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. +// +// Deprecated: use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. +// +// Deprecated: use `user_agent.original` instead. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address`. + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port`. + NetHostPortKey = attribute.Key("net.host.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'example.com' + // Deprecated: use `server.address` on client spans and `client.address` on + // server spans. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `server.port` on client spans and `client.port` on + // server spans. + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'amqp', 'http', 'mqtt' + // Deprecated: use `network.protocol.name`. + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '3.1.1' + // Deprecated: use `network.protocol.version`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: use `network.local.address`. + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 8080 + // Deprecated: use `network.local.port`. + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '192.168.0.1' + // Deprecated: use `network.peer.address`. + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '/var/my.sock' + // Deprecated: no replacement at this time. + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. + // + // Type: int + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 65531 + // Deprecated: use `network.peer.port`. + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: deprecated + // Deprecated: use `network.transport`. + NetTransportKey = attribute.Key("net.transport") +) + +var ( + // IPv4 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + // + // Deprecated: use `network.transport` and `network.type`. + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // ip_tcp + // + // Deprecated: use `network.transport`. + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + // + // Deprecated: use `network.transport`. + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + // + // Deprecated: use `network.transport`. + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + // + // Deprecated: use `network.transport`. + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + // + // Deprecated: use `network.transport`. + NetTransportOther = NetTransportKey.String("other") +) + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. +// +// Deprecated: use `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. +// +// Deprecated: use `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. +// +// Deprecated: use `server.address` on client spans and `client.address` on +// server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. +// +// Deprecated: use `server.port` on client spans and `client.port` on server +// spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. +// +// Deprecated: use `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. +// +// Deprecated: use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. +// +// Deprecated: use `network.local.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. +// +// Deprecated: use `network.local.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. +// +// Deprecated: use `network.peer.address`. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. +// +// Deprecated: no replacement at this time. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. +// +// Deprecated: use `network.peer.port`. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable and SHOULD have low + // cardinality. + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client_id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client_id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents an identifier for + // the messaging system being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationPublish = MessagingOperationKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationCreate = MessagingOperationKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationReceive = MessagingOperationKey.String("receive") + // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs + MessagingOperationDeliver = MessagingOperationKey.String("deliver") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") + // Azure Event Hubs + MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") + // Azure Service Bus + MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client_id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // version of the protocol specified in `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `network.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the version +// of the protocol specified in `network.protocol.name`. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed) and SHOULD NOT be validated or modified except for + // sanitizing purposes. + URLFullKey = attribute.Key("url.full") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + URLPathKey = attribute.Key("url.path") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in query string SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") +) + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go new file mode 100644 index 000000000..d27e8a8f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.24.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go new file mode 100644 index 000000000..6c019aafc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // IosStateKey is the attribute Key conforming to the "ios.state" semantic + // conventions. It represents the this attribute represents the state the + // application has transitioned into at the occurrence of the event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate + // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), + // and from which the `OS terminology` column values are derived. + IosStateKey = attribute.Key("ios.state") +) + +var ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + IosStateActive = IosStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + IosStateInactive = IosStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + IosStateBackground = IosStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + IosStateForeground = IosStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + IosStateTerminate = IosStateKey.String("terminate") +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the this attribute represents the + // state the application has transitioned into at the occurrence of the + // event. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessageTypeKey = attribute.Key("message.type") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go new file mode 100644 index 000000000..7235bb51d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go new file mode 100644 index 000000000..a6b953f62 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go @@ -0,0 +1,1071 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +const ( + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // time between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Experimental + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Experimental + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Experimental + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Experimental + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // HTTPClientRequestTimeInQueue is the metric conforming to the + // "http.client.request.time_in_queue" semantic conventions. It represents the + // amount of time requests spent on a queue waiting for an available + // connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" + HTTPClientRequestTimeInQueueUnit = "s" + HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Experimental + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Experimental + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Experimental + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingDeliverDuration is the metric conforming to the + // "messaging.deliver.duration" semantic conventions. It represents the + // measures the duration of deliver operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingDeliverDurationName = "messaging.deliver.duration" + MessagingDeliverDurationUnit = "s" + MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingDeliverMessages is the metric conforming to the + // "messaging.deliver.messages" semantic conventions. It represents the + // measures the number of delivered messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingDeliverMessagesName = "messaging.deliver.messages" + MessagingDeliverMessagesUnit = "{message}" + MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessesCount is the metric conforming to the + // "system.processes.count" semantic conventions. It represents the total + // number of processes in each state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessesCountName = "system.processes.count" + SystemProcessesCountUnit = "{process}" + SystemProcessesCountDescription = "Total number of processes in each state" + + // SystemProcessesCreated is the metric conforming to the + // "system.processes.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessesCreatedName = "system.processes.created" + SystemProcessesCreatedUnit = "{process}" + SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go new file mode 100644 index 000000000..d66bbe9c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go @@ -0,0 +1,2545 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val int) attribute.KeyValue { + return HostCPUSteppingKey.Int(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Resource used by Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Resources used by Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// Heroku dyno metadata +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// A serverless instance. +const ( + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + // Deprecated: use the `otel.scope.name` attribute. + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + // Deprecated: use the `otel.scope.version` attribute. + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. +// +// Deprecated: use the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. +// +// Deprecated: use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go new file mode 100644 index 000000000..fe80b1731 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go new file mode 100644 index 000000000..c1718234e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go @@ -0,0 +1,1323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" + +import "go.opentelemetry.io/otel/attribute" + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") +) + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") +) + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index caf7249de..6836c6547 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 000000000..58ccaba69 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 3aadc66cf..273d58e00 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 76f9a083c..5650a174b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -47,12 +36,12 @@ func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) conte // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { - return noopSpan{} + return noopSpanInstance } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } - return noopSpan{} + return noopSpanInstance } // SpanContextFromContext returns the current Span's SpanContext. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 440f3d756..d661c5d10 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 /* Package trace provides an implementation of the tracing part of the diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 000000000..7754a239e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go index 898db5a75..3e359a00b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package embedded provides interfaces embedded within the [OpenTelemetry // trace API]. diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go index 88fcb8161..c00221e7b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index c125491ca..ca20e9997 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -52,7 +41,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure - span = noopSpan{} + span = noopSpanInstance } return ContextWithSpan(ctx, span), span } @@ -60,7 +49,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption // noopSpan is an implementation of Span that performs no operations. type noopSpan struct{ embedded.Span } -var _ Span = noopSpan{} +var noopSpanInstance Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } @@ -86,6 +75,9 @@ func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + // SetName does nothing. func (noopSpan) SetName(string) {} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 26a4b2260..28877d4ab 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" @@ -361,6 +350,12 @@ type Span interface { // AddEvent adds an event with the provided name and options. AddEvent(name string, options ...EventOption) + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + // IsRecording returns the recording state of the Span. It will return // true if the Span is active and events can be recorded. IsRecording() bool diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index db936ba5b..20b5cf243 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -1,16 +1,5 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh index dbb61a422..e57bf57fc 100644 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -1,18 +1,7 @@ #!/bin/bash # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 set -euo pipefail diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 000000000..1e87855ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 7b2993a1f..ab2896052 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,20 +1,9 @@ // Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.24.0" + return "1.28.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 1b556e678..241cfc82a 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -1,20 +1,9 @@ # Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# SPDX-License-Identifier: Apache-2.0 module-sets: stable-v1: - version: v1.24.0 + version: v1.28.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -40,17 +29,21 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.46.0 + version: v0.50.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.0.1-alpha + version: v0.4.0 modules: - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.7 + version: v0.0.8 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 4ed22bd76..4ebeb61c1 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -100,18 +100,12 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti aud = settings.DefaultAudience } - tokenURL, oauth2Client, err := GetOAuth2Configuration(ctx, settings) - if err != nil { - return nil, err - } creds, err := credentials.DetectDefault(&credentials.DetectOptions{ Scopes: scopes, Audience: aud, CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, - TokenURL: tokenURL, - Client: oauth2Client, }) if err != nil { return nil, err diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 7a6798036..e4e4e6bf7 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.191.0" +const Version = "0.193.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 78e9eb1c7..5ee9276a0 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -43,7 +43,7 @@ "location": "me-central2" } ], - "etag": "\"323732353932323032353837333633313231\"", + "etag": "\"34373939373134303235393739323331393435\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -560,7 +560,7 @@ "buckets": { "methods": { "delete": { - "description": "Permanently deletes an empty bucket.", + "description": "Deletes an empty bucket. Deletions are permanent unless soft delete is enabled on the bucket.", "httpMethod": "DELETE", "id": "storage.buckets.delete", "parameterOrder": [ @@ -612,6 +612,12 @@ "required": true, "type": "string" }, + "generation": { + "description": "If present, specifies the generation of the bucket. This is required if softDeleted is true.", + "format": "int64", + "location": "query", + "type": "string" + }, "ifMetagenerationMatch": { "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", "format": "int64", @@ -637,6 +643,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, return the soft-deleted version of this bucket. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -860,6 +871,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted bucket versions will be returned. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request.", "location": "query", @@ -1013,6 +1029,41 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "restore": { + "description": "Restores a soft-deleted bucket.", + "httpMethod": "POST", + "id": "storage.buckets.restore", + "parameterOrder": [ + "bucket", + "generation" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "Generation of a bucket.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/restore", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "setIamPolicy": { "description": "Updates an IAM policy for the specified bucket.", "httpMethod": "PUT", @@ -2779,7 +2830,7 @@ "type": "string" }, "softDeleted": { - "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", "location": "query", "type": "boolean" }, @@ -3041,7 +3092,7 @@ "type": "string" }, "softDeleted": { - "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", "location": "query", "type": "boolean" }, @@ -3056,7 +3107,7 @@ "type": "string" }, "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see [Object Versioning](https://cloud.google.com/storage/docs/object-versioning).", "location": "query", "type": "boolean" } @@ -3235,7 +3286,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -3704,7 +3755,7 @@ "type": "string" }, "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see [Object Versioning](https://cloud.google.com/storage/docs/object-versioning).", "location": "query", "type": "boolean" } @@ -4006,7 +4057,7 @@ ] }, "update": { - "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", + "description": "Updates the state of an HMAC key. See the [HMAC Key resource descriptor](https://cloud.google.com/storage/docs/json_api/v1/projects/hmacKeys/update#request-body) for valid states.", "httpMethod": "PUT", "id": "storage.projects.hmacKeys.update", "parameterOrder": [ @@ -4085,7 +4136,7 @@ } } }, - "revision": "20240706", + "revision": "20240809", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { @@ -4293,6 +4344,11 @@ "description": "HTTP 1.1 Entity tag for the bucket.", "type": "string" }, + "generation": { + "description": "The generation of this bucket.", + "format": "int64", + "type": "string" + }, "hierarchicalNamespace": { "description": "The bucket's hierarchical namespace configuration.", "properties": { @@ -4404,7 +4460,7 @@ "type": "object" }, "lifecycle": { - "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "description": "The bucket's lifecycle configuration. See [Lifecycle Management](https://cloud.google.com/storage/docs/lifecycle) for more information.", "properties": { "rule": { "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", @@ -4503,7 +4559,7 @@ "type": "object" }, "location": { - "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.", + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the [Developer's Guide](https://cloud.google.com/storage/docs/locations) for the authoritative list.", "type": "string" }, "locationType": { @@ -4591,6 +4647,10 @@ "description": "The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO to turn on Turbo Replication on a bucket.", "type": "string" }, + "satisfiesPZI": { + "description": "Reserved for future use.", + "type": "boolean" + }, "satisfiesPZS": { "description": "Reserved for future use.", "type": "boolean" @@ -4616,7 +4676,7 @@ "type": "object" }, "storageClass": { - "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see [Storage Classes](https://cloud.google.com/storage/docs/storage-classes).", "type": "string" }, "timeCreated": { @@ -4640,7 +4700,7 @@ "type": "object" }, "website": { - "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the [Static Website Examples](https://cloud.google.com/storage/docs/static-website) for more information.", "properties": { "mainPageSuffix": { "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.", @@ -5435,7 +5495,7 @@ "type": "string" }, "crc32c": { - "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.", + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).", "type": "string" }, "customTime": { @@ -5489,7 +5549,7 @@ "type": "string" }, "md5Hash": { - "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.", + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see [Data Validation and Change Detection](https://cloud.google.com/storage/docs/data-validation).", "type": "string" }, "mediaLink": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 27504b0aa..8a16d7734 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -134,6 +134,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.EnableNewAuthLibrary()) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -460,6 +461,8 @@ type Bucket struct { Encryption *BucketEncryption `json:"encryption,omitempty"` // Etag: HTTP 1.1 Entity tag for the bucket. Etag string `json:"etag,omitempty"` + // Generation: The generation of this bucket. + Generation int64 `json:"generation,omitempty,string"` // HierarchicalNamespace: The bucket's hierarchical namespace configuration. HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` // IamConfiguration: The bucket's IAM configuration. @@ -475,12 +478,13 @@ type Bucket struct { Kind string `json:"kind,omitempty"` // Labels: User-provided labels, in key/value pairs. Labels map[string]string `json:"labels,omitempty"` - // Lifecycle: The bucket's lifecycle configuration. See lifecycle management - // for more information. + // Lifecycle: The bucket's lifecycle configuration. See Lifecycle Management + // (https://cloud.google.com/storage/docs/lifecycle) for more information. Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` // Location: The location of the bucket. Object data for objects in the bucket // resides in physical storage within this region. Defaults to US. See the - // developer's guide for the authoritative list. + // Developer's Guide (https://cloud.google.com/storage/docs/locations) for the + // authoritative list. Location string `json:"location,omitempty"` // LocationType: The type of the bucket location. LocationType string `json:"locationType,omitempty"` @@ -511,6 +515,8 @@ type Bucket struct { // Rpo: The Recovery Point Objective (RPO) of this bucket. Set to ASYNC_TURBO // to turn on Turbo Replication on a bucket. Rpo string `json:"rpo,omitempty"` + // SatisfiesPZI: Reserved for future use. + SatisfiesPZI bool `json:"satisfiesPZI,omitempty"` // SatisfiesPZS: Reserved for future use. SatisfiesPZS bool `json:"satisfiesPZS,omitempty"` // SelfLink: The URI of this bucket. @@ -525,7 +531,8 @@ type Bucket struct { // storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, // COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not // specified when the bucket is created, it will default to STANDARD. For more - // information, see storage classes. + // information, see Storage Classes + // (https://cloud.google.com/storage/docs/storage-classes). StorageClass string `json:"storageClass,omitempty"` // TimeCreated: The creation time of the bucket in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` @@ -535,7 +542,8 @@ type Bucket struct { Versioning *BucketVersioning `json:"versioning,omitempty"` // Website: The bucket's website configuration, controlling how the service // behaves when accessing bucket contents as a web site. See the Static Website - // Examples for more information. + // Examples (https://cloud.google.com/storage/docs/static-website) for more + // information. Website *BucketWebsite `json:"website,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. @@ -886,8 +894,9 @@ func (s BucketIpFilterVpcNetworkSources) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } -// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle -// management for more information. +// BucketLifecycle: The bucket's lifecycle configuration. See Lifecycle +// Management (https://cloud.google.com/storage/docs/lifecycle) for more +// information. type BucketLifecycle struct { // Rule: A lifecycle management rule, which is made of an action to take and // the condition(s) under which the action will be taken. @@ -1199,7 +1208,8 @@ func (s BucketVersioning) MarshalJSON() ([]byte, error) { // BucketWebsite: The bucket's website configuration, controlling how the // service behaves when accessing bucket contents as a web site. See the Static -// Website Examples for more information. +// Website Examples (https://cloud.google.com/storage/docs/static-website) for +// more information. type BucketWebsite struct { // MainPageSuffix: If the requested object path is missing, the service will // ensure the path has a trailing '/', append this suffix, and attempt to @@ -2166,7 +2176,8 @@ type Object struct { ContentType string `json:"contentType,omitempty"` // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded using // base64 in big-endian byte order. For more information about using the CRC32c - // checksum, see Hashes and ETags: Best Practices. + // checksum, see Data Validation and Change Detection + // (https://cloud.google.com/storage/docs/data-validation). Crc32c string `json:"crc32c,omitempty"` // CustomTime: A timestamp in RFC 3339 format specified by the user for an // object. @@ -2204,7 +2215,8 @@ type Object struct { // request to fail with status code 400 - Bad Request. KmsKeyName string `json:"kmsKeyName,omitempty"` // Md5Hash: MD5 hash of the data; encoded using base64. For more information - // about using the MD5 hash, see Hashes and ETags: Best Practices. + // about using the MD5 hash, see Data Validation and Change Detection + // (https://cloud.google.com/storage/docs/data-validation). Md5Hash string `json:"md5Hash,omitempty"` // MediaLink: Media download link. MediaLink string `json:"mediaLink,omitempty"` @@ -4173,7 +4185,8 @@ type BucketsDeleteCall struct { header_ http.Header } -// Delete: Permanently deletes an empty bucket. +// Delete: Deletes an empty bucket. Deletions are permanent unless soft delete +// is enabled on the bucket. // // - bucket: Name of a bucket. func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { @@ -4277,6 +4290,13 @@ func (r *BucketsService) Get(bucket string) *BucketsGetCall { return c } +// Generation sets the optional parameter "generation": If present, specifies +// the generation of the bucket. This is required if softDeleted is true. +func (c *BucketsGetCall) Generation(generation int64) *BucketsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + // IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": // Makes the return of the bucket metadata conditional on whether the bucket's // current metageneration matches the given value. @@ -4306,6 +4326,15 @@ func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { return c } +// SoftDeleted sets the optional parameter "softDeleted": If true, return the +// soft-deleted version of this bucket. The default is false. For more +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *BucketsGetCall) SoftDeleted(softDeleted bool) *BucketsGetCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. Required for Requester Pays buckets. func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { @@ -4874,6 +4903,15 @@ func (c *BucketsListCall) Projection(projection string) *BucketsListCall { return c } +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted bucket versions will be returned. The default is false. For +// more information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). +func (c *BucketsListCall) SoftDeleted(softDeleted bool) *BucketsListCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { @@ -5288,6 +5326,87 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { return ret, nil } +type BucketsRestoreCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores a soft-deleted bucket. +// +// - bucket: Name of a bucket. +// - generation: Generation of a bucket. +func (r *BucketsService) Restore(bucket string, generation int64) *BucketsRestoreCall { + c := &BucketsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *BucketsRestoreCall) UserProject(userProject string) *BucketsRestoreCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *BucketsRestoreCall) Fields(s ...googleapi.Field) *BucketsRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *BucketsRestoreCall) Context(ctx context.Context) *BucketsRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *BucketsRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.restore" call. +func (c *BucketsRestoreCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil +} + type BucketsSetIamPolicyCall struct { s *Service bucket string @@ -9840,7 +9959,8 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { // SoftDeleted sets the optional parameter "softDeleted": If true, only // soft-deleted object versions will be listed. The default is false. For more -// information, see Soft Delete. +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). func (c *ObjectsGetCall) SoftDeleted(softDeleted bool) *ObjectsGetCall { c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) return c @@ -10503,7 +10623,8 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { // SoftDeleted sets the optional parameter "softDeleted": If true, only // soft-deleted object versions will be listed. The default is false. For more -// information, see Soft Delete. +// information, see Soft Delete +// (https://cloud.google.com/storage/docs/soft-delete). func (c *ObjectsListCall) SoftDeleted(softDeleted bool) *ObjectsListCall { c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) return c @@ -10527,7 +10648,8 @@ func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { // Versions sets the optional parameter "versions": If true, lists all versions // of an object as distinct results. The default is false. For more -// information, see Object Versioning. +// information, see Object Versioning +// (https://cloud.google.com/storage/docs/object-versioning). func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c @@ -10865,7 +10987,8 @@ type ObjectsRestoreCall struct { // - bucket: Name of the bucket in which the object resides. // - generation: Selects a specific revision of this object. // - object: Name of the object. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). func (r *ObjectsService) Restore(bucket string, object string, generation int64) *ObjectsRestoreCall { c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket @@ -11858,7 +11981,8 @@ func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCa // Versions sets the optional parameter "versions": If true, lists all versions // of an object as distinct results. The default is false. For more -// information, see Object Versioning. +// information, see Object Versioning +// (https://cloud.google.com/storage/docs/object-versioning). func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { c.urlParams_.Set("versions", fmt.Sprint(versions)) return c @@ -12774,7 +12898,9 @@ type ProjectsHmacKeysUpdateCall struct { } // Update: Updates the state of an HMAC key. See the HMAC Key resource -// descriptor for valid states. +// descriptor +// (https://cloud.google.com/storage/docs/json_api/v1/projects/hmacKeys/update#request-body) +// for valid states. // // - accessId: Name of the HMAC key being updated. // - projectId: Project ID owning the service account of the updated key. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index ab0fbb79b..b572707c6 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Prerequisites -- **[Go][]**: any one of the **three latest major** [releases][go-releases]. +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. ## Installation diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index bdf93dbfe..0adc98866 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index c57857ac0..57a792a7b 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index 8942c3131..96a57c8c7 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -21,14 +21,14 @@ package grpclb import ( "encoding/json" - "google.golang.org/grpc" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/serviceconfig" ) const ( roundRobinName = roundrobin.Name - pickFirstName = grpc.PickFirstBalancerName + pickFirstName = pickfirst.Name ) type grpclbServiceConfig struct { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 20c5f2ec3..671bc663f 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -19,13 +19,13 @@ package grpclb import ( + "math/rand" "sync" "sync/atomic" "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/status" ) @@ -112,7 +112,7 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), } } @@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, - subConnsNext: grpcrand.Intn(len(readySCs)), + subConnsNext: rand.Intn(len(readySCs)), stats: stats, } } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go similarity index 89% rename from vendor/google.golang.org/grpc/pickfirst.go rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 885362661..07527603f 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -16,26 +16,36 @@ * */ -package grpc +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst import ( "encoding/json" "errors" "fmt" + "math/rand" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + const ( - // PickFirstBalancerName is the name of the pick_first balancer. - PickFirstBalancerName = "pick_first" - logPrefix = "[pick-first-lb %p] " + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " ) type pickfirstBuilder struct{} @@ -47,7 +57,7 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) } func (pickfirstBuilder) Name() string { - return PickFirstBalancerName + return Name } type pfConfig struct { @@ -93,6 +103,12 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by @@ -124,7 +140,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each @@ -145,7 +161,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState addrs = state.ResolverState.Addresses if cfg.ShuffleAddressList { addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } } diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index f7031ad22..260255d31 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,12 @@ package roundrobin import ( + "math/rand" "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcrand" ) // Name is the name of round_robin balancer. @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(grpcrand.Intn(len(scs))), + next: uint32(rand.Intn(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index af39b8a4c..4161fdf47 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -198,6 +198,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.cc.mu.Lock() defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } ccb.mu.Lock() if ccb.closed { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 1afb1e84a..63c639e4f 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 2359f94b8..423be7b43 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" @@ -72,6 +73,8 @@ var ( // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name ) // The following errors are returned from Dial and DialContext @@ -152,6 +155,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) for _, opt := range opts { opt.apply(&cc.dopts) } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) @@ -160,7 +173,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } if cc.dopts.defaultServiceConfigRawJSON != nil { - scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) if scpr.Err != nil { return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } @@ -168,24 +181,15 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.mkp = cc.dopts.copts.KeepaliveParams - // Register ClientConn with channelz. + if err = cc.initAuthority(); err != nil { + return nil, err + } + + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. cc.channelzRegistration(target) - - // TODO: Ideally it should be impossible to error from this function after - // channelz registration. This will require removing some channelz logs - // from the following functions that can error. Errors can be returned to - // the user, and successful logs can be emitted here, after the checks have - // passed and channelz is subsequently registered. - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) - return nil, err - } - if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelz.ID) - return nil, err - } + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) @@ -587,11 +591,11 @@ type ClientConn struct { // The following are initialized at dial time, and are read-only after that. target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). dopts dialOptions // Default and user specified dial options. channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't @@ -692,8 +696,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { - balancer.Register(pickfirstBuilder{}) - cfg := parseServiceConfig("{}") + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } @@ -1673,22 +1676,19 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -// parseTargetAndFindResolver parses the user's dial target and stores the -// parsed target in `cc.parsedTarget`. +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. // // The resolver to use is determined based on the scheme in the parsed target // and the same is stored in `cc.resolverBuilder`. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target) +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) - if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err) - } else { - channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget) + if err == nil { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1707,15 +1707,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { defScheme = resolver.GetDefaultScheme() } - channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1805,7 +1802,7 @@ func encodeAuthority(authority string) string { // credentials do not match the authority configured through the dial option. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. -func (cc *ClientConn) determineAuthority() error { +func (cc *ClientConn) initAuthority() error { dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials @@ -1838,6 +1835,5 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) return nil } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index fe4488a95..38cb5cf0d 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index adbad6b2f..55fc7f65f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index d1af55260..358074b64 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v4.25.2 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index d65ffe6e7..18cc9cfbd 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.33.0 +// protoc-gen-go v1.34.1 // protoc v4.25.2 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 5dafd34ed..411435854 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -27,9 +27,13 @@ import ( "net/url" "os" + "google.golang.org/grpc/grpclog" credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" ) +var logger = grpclog.Component("credentials") + // TLSInfo contains the auth information for a TLS authenticated connection. // It implements the AuthInfo interface. type TLSInfo struct { @@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon conn.Close() return nil, nil, ctx.Err() } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } tlsInfo := TLSInfo{ State: conn.ConnectionState(), CommonAuthInfo: CommonAuthInfo{ @@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) conn.Close() return nil, nil, err } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } tlsInfo := TLSInfo{ - State: conn.ConnectionState(), + State: cs, CommonAuthInfo: CommonAuthInfo{ SecurityLevel: PrivacyAndIntegrity, }, diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 00273702b..f5453d48a 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -21,6 +21,7 @@ package grpc import ( "context" "net" + "net/url" "time" "google.golang.org/grpc/backoff" @@ -36,6 +37,11 @@ import ( "google.golang.org/grpc/stats" ) +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { globalDialOptions = append(globalDialOptions, opt...) @@ -43,6 +49,14 @@ func init() { internal.ClearGlobalDialOptions = func() { globalDialOptions = nil } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions @@ -80,6 +94,7 @@ type dialOptions struct { idleTimeout time.Duration recvBufferPool SharedBufferPool defaultScheme string + maxCallAttempts int } // DialOption configures how we set up the connection. @@ -89,6 +104,19 @@ type DialOption interface { var globalDialOptions []DialOption +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -655,6 +683,7 @@ func defaultDialOptions() dialOptions { idleTimeout: 30 * time.Minute, recvBufferPool: nopBufferPool{}, defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, } } @@ -712,6 +741,23 @@ func WithIdleTimeout(d time.Duration) DialOption { }) } +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. +// +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) +} + // WithRecvBufferPool returns a DialOption that configures the ClientConn // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index fed1c011a..b15cf482d 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,10 +25,10 @@ package backoff import ( "context" "errors" + "math/rand" "time" grpcbackoff "google.golang.org/grpc/backoff" - "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection @@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 9c915d9e4..d90648713 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -40,6 +40,12 @@ var ( // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go deleted file mode 100644 index 0126d6b51..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ /dev/null @@ -1,100 +0,0 @@ -//go:build !go1.21 - -// TODO: when this file is deleted (after Go 1.20 support is dropped), delete -// all of grpcrand and call the rand package directly. - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - mu sync.Mutex -) - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - mu.Lock() - defer mu.Unlock() - return r.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - mu.Lock() - defer mu.Unlock() - return r.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - mu.Lock() - defer mu.Unlock() - return r.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - mu.Lock() - defer mu.Unlock() - return r.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - mu.Lock() - defer mu.Unlock() - return r.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - mu.Lock() - defer mu.Unlock() - return r.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - mu.Lock() - defer mu.Unlock() - return r.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - mu.Lock() - defer mu.Unlock() - return r.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - mu.Lock() - defer mu.Unlock() - r.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go deleted file mode 100644 index c37299af1..000000000 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build go1.21 - -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcrand implements math/rand functions in a concurrent-safe way -// with a global random source, independent of math/rand's global source. -package grpcrand - -import "math/rand" - -// This implementation will be used for Go version 1.21 or newer. -// For older versions, the original implementation with mutex will be used. - -// Int implements rand.Int on the grpcrand global source. -func Int() int { - return rand.Int() -} - -// Int63n implements rand.Int63n on the grpcrand global source. -func Int63n(n int64) int64 { - return rand.Int63n(n) -} - -// Intn implements rand.Intn on the grpcrand global source. -func Intn(n int) int { - return rand.Intn(n) -} - -// Int31n implements rand.Int31n on the grpcrand global source. -func Int31n(n int32) int32 { - return rand.Int31n(n) -} - -// Float64 implements rand.Float64 on the grpcrand global source. -func Float64() float64 { - return rand.Float64() -} - -// Uint64 implements rand.Uint64 on the grpcrand global source. -func Uint64() uint64 { - return rand.Uint64() -} - -// Uint32 implements rand.Uint32 on the grpcrand global source. -func Uint32() uint32 { - return rand.Uint32() -} - -// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. -func ExpFloat64() float64 { - return rand.ExpFloat64() -} - -// Shuffle implements rand.Shuffle on the grpcrand global source. -var Shuffle = func(n int, f func(int, int)) { - rand.Shuffle(n, f) -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 48d24bdb4..5d6653986 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -106,6 +106,14 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. ClearGlobalDialOptions func() + + // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be + // configured for newly created ClientConns. + AddGlobalPerTargetDialOptions any // func (opt any) + // ClearGlobalPerTargetDialOptions clears the slice of global late apply + // dial options. + ClearGlobalPerTargetDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a // single dial option. JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption @@ -126,7 +134,8 @@ var ( // deleted or changed. BinaryLogger any // func(binarylog.Logger) grpc.ServerOption - // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a + // provided grpc.ClientConn. SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using @@ -184,25 +193,25 @@ var ( ChannelzTurnOffForTesting func() - // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found - // error for a given resource type and name. This is usually triggered when - // the associated watch timer fires. For testing purposes, having this - // function makes events more predictable than relying on timer events. - TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to + // invoke resource-not-found error for the given resource type and name. + TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error - // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client - // singleton to invoke resource not found for a resource type name and - // resource name. - TriggerXDSResourceNameNotFoundClient any // func(string, string) error - - // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + // FromOutgoingContextRaw returns the un-merged, intermediary contents of + // metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) - // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme. + // UserSetDefaultScheme is set to true if the user has overridden the + // default resolver scheme. UserSetDefaultScheme bool = false + + // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n + // is the number of elements. swap swaps the elements with indexes i and j. + ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) ) -// HealthChecker defines the signature of the client-side LB channel health checking function. +// HealthChecker defines the signature of the client-side LB channel health +// checking function. // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index f3f52a59a..4552db16b 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,6 +24,7 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "net" "os" "strconv" @@ -35,7 +36,6 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -63,6 +63,8 @@ var ( func init() { resolver.Register(NewBuilder()) internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until internal.NewNetResolver = newNetResolver internal.AddressDialer = addressDialer } @@ -209,12 +211,12 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var waitTime time.Duration + var nextResolutionTime time.Time if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = MinResolutionInterval + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) select { case <-d.ctx.Done(): return @@ -223,13 +225,13 @@ func (d *dnsResolver) watcher() { } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - waitTime = backoff.DefaultExponential.Backoff(backoffIndex) + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } select { case <-d.ctx.Done(): return - case <-internal.TimeAfterFunc(waitTime): + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): } } } @@ -423,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return grpcrand.Intn(100)+1 <= *a + return rand.Intn(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index a7ecaf8d5..c0eae4f5f 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -51,11 +51,22 @@ var ( // The following vars are overridden from tests. var ( // TimeAfterFunc is used by the DNS resolver to wait for the given duration - // to elapse. In non-test code, this is implemented by time.After. In test + // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is // blocked waiting for the duration to elapse. TimeAfterFunc func(time.Duration) <-chan time.Time + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + // NewNetResolver returns the net.Resolver instance for the given target. NewNetResolver func(string) (NetResolver, error) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index cab0e2d3d..b7091165b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "math" + "math/rand" "net" "net/http" "strconv" @@ -43,7 +44,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -1440,7 +1440,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := grpcrand.Int63n(2*r) - r + j := rand.Int63n(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 56e8aba78..bdaa2130e 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -22,7 +22,7 @@ import ( "context" "fmt" "io" - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -33,35 +33,43 @@ import ( "google.golang.org/grpc/status" ) +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] statsHandlers []stats.Handler // to record blocking picker calls } func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { - return &pickerWrapper{ - blockingCh: make(chan struct{}), + pw := &pickerWrapper{ statsHandlers: statsHandlers, } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw } -// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() - return - } - pw.picker = p - // pw.blockingCh should never be nil. - close(pw.blockingCh) - pw.blockingCh = make(chan struct{}) - pw.mu.Unlock() + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) } // doneChannelzWrapper performs the following: @@ -98,20 +106,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var lastPickErr error for { - pw.mu.Lock() - if pw.done { - pw.mu.Unlock() + pg := pw.pickerGen.Load() + if pg == nil { return nil, balancer.PickResult{}, ErrClientConnClosing } - - if pw.picker == nil { - ch = pw.blockingCh + if pg.picker == nil { + ch = pg.blockingCh } - if ch == pw.blockingCh { + if ch == pg.blockingCh { // This could happen when either: // - pw.picker is nil (the previous if condition), or - // - has called pick on the current picker. - pw.mu.Unlock() + // - we have already called pick on the current picker. select { case <-ctx.Done(): var errStr string @@ -145,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } } - ch = pw.blockingCh - p := pw.picker - pw.mu.Unlock() + ch = pg.blockingCh + p := pg.picker pickResult, err := p.Pick(info) if err != nil { @@ -197,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } func (pw *pickerWrapper) close() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.done = true - close(pw.blockingCh) + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) } // reset clears the pickerWrapper and prepares it for being used again when idle // mode is exited. func (pw *pickerWrapper) reset() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.blockingCh = make(chan struct{}) + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 9dcc9780f..c5fb45236 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -171,7 +171,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { // ParseServiceConfig is called by resolver implementations to parse a JSON // representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) } // addChannelzTraceEvent adds a channelz trace event containing the new diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 9da8fc802..2671c5ef6 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" @@ -163,9 +164,11 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfig = parseServiceConfig + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } } -func parseServiceConfig(js string) *serviceconfig.ParseResult { +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } @@ -183,12 +186,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } c := rsc.LoadBalancingConfig if c == nil { - name := PickFirstBalancerName + name := pickfirst.Name if rsc.LoadBalancingPolicy != nil { name = *rsc.LoadBalancingPolicy } if balancer.Get(name) == nil { - name = PickFirstBalancerName + name = pickfirst.Name } cfg := []map[string]any{{name: struct{}{}}} strCfg, err := json.Marshal(cfg) @@ -218,7 +221,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { WaitForReady: m.WaitForReady, Timeout: (*time.Duration)(m.Timeout), } - if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -264,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } -func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } @@ -278,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol return nil, nil } + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } rp := &internalserviceconfig.RetryPolicy{ - MaxAttempts: jrp.MaxAttempts, + MaxAttempts: maxAttempts, InitialBackoff: time.Duration(jrp.InitialBackoff), MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } - if rp.MaxAttempts > 5 { - // TODO(retry): Make the max maxAttempts configurable. - rp.MaxAttempts = 5 - } for _, code := range jrp.RetryableStatusCodes { rp.RetryableStatusCodes[code] = true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b54563e81..8051ef5b5 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,6 +23,7 @@ import ( "errors" "io" "math" + "math/rand" "strconv" "sync" "time" @@ -34,7 +35,6 @@ import ( "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" @@ -699,7 +699,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(grpcrand.Int63n(int64(cur))) + dur = time.Duration(rand.Int63n(int64(cur))) cs.numRetriesSincePushback++ } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index a0b782890..bafaef99b 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.64.1" +const Version = "1.65.0" diff --git a/vendor/modules.txt b/vendor/modules.txt index 1b8383e06..50d38e4ff 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,11 +1,11 @@ -# cloud.google.com/go v0.115.0 +# cloud.google.com/go v0.115.1 ## explicit; go 1.20 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.8.0 -## explicit; go 1.20 +# cloud.google.com/go/auth v0.9.0 +## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials cloud.google.com/go/auth/credentials/internal/externalaccount @@ -20,11 +20,11 @@ cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.3 +# cloud.google.com/go/auth/oauth2adapt v0.2.4 ## explicit; go 1.20 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute v1.27.5 -## explicit; go 1.20 +# cloud.google.com/go/compute v1.28.0 +## explicit; go 1.21 cloud.google.com/go/compute/apiv1 cloud.google.com/go/compute/apiv1/computepb cloud.google.com/go/compute/internal @@ -362,7 +362,7 @@ github.com/beorn7/perks/quantile # github.com/cenkalti/backoff/v4 v4.2.1 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 -# github.com/cespare/xxhash/v2 v2.2.0 +# github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/containerd/cgroups/v3 v3.0.3 @@ -375,10 +375,10 @@ github.com/containerd/errdefs ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/containers/common v0.60.0 +# github.com/containers/common v0.60.2 ## explicit; go 1.21.0 github.com/containers/common/pkg/retry -# github.com/containers/image/v5 v5.32.0 +# github.com/containers/image/v5 v5.32.2 ## explicit; go 1.21.0 github.com/containers/image/v5/copy github.com/containers/image/v5/directory @@ -677,7 +677,7 @@ github.com/golang-jwt/jwt/v4 # github.com/golang-jwt/jwt/v5 v5.2.1 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 -# github.com/golang/glog v1.2.0 +# github.com/golang/glog v1.2.1 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -740,6 +740,7 @@ github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal +github.com/googleapis/gax-go/v2/iterator # github.com/gophercloud/gophercloud v1.14.0 ## explicit; go 1.14 github.com/gophercloud/gophercloud @@ -872,7 +873,7 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/mattn/go-runewidth v0.0.15 +# github.com/mattn/go-runewidth v0.0.16 ## explicit; go 1.9 github.com/mattn/go-runewidth # github.com/mattn/go-sqlite3 v1.14.22 @@ -943,7 +944,7 @@ github.com/oracle/oci-go-sdk/v54/identity github.com/oracle/oci-go-sdk/v54/objectstorage github.com/oracle/oci-go-sdk/v54/objectstorage/transfer github.com/oracle/oci-go-sdk/v54/workrequests -# github.com/osbuild/images v0.77.0 +# github.com/osbuild/images v0.79.0 ## explicit; go 1.21.0 github.com/osbuild/images/internal/common github.com/osbuild/images/internal/environment @@ -1107,13 +1108,13 @@ github.com/valyala/fasttemplate github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.7.4 +# github.com/vbauerster/mpb/v8 v8.7.5 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter github.com/vbauerster/mpb/v8/decor github.com/vbauerster/mpb/v8/internal -# github.com/vmware/govmomi v0.39.0 +# github.com/vmware/govmomi v0.42.0 ## explicit; go 1.21 github.com/vmware/govmomi github.com/vmware/govmomi/cns @@ -1193,16 +1194,17 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 -## explicit; go 1.20 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 +## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -1214,13 +1216,14 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 -# go.opentelemetry.io/otel/metric v1.24.0 -## explicit; go 1.20 +go.opentelemetry.io/otel/semconv/v1.24.0 +# go.opentelemetry.io/otel/metric v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/trace v1.24.0 -## explicit; go 1.20 +# go.opentelemetry.io/otel/trace v1.28.0 +## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded # golang.org/x/crypto v0.26.0 @@ -1249,7 +1252,7 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 +# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps @@ -1327,8 +1330,8 @@ golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports golang.org/x/tools/internal/stdlib -# google.golang.org/api v0.191.0 -## explicit; go 1.20 +# google.golang.org/api v0.193.0 +## explicit; go 1.21 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/iamcredentials/v1 @@ -1345,22 +1348,22 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf -## explicit; go 1.20 +# google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 +## explicit; go 1.21 google.golang.org/genproto/googleapis/cloud/extendedops google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 +## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.64.1 -## explicit; go 1.19 +# google.golang.org/grpc v1.65.0 +## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1369,6 +1372,7 @@ google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -1399,7 +1403,6 @@ google.golang.org/grpc/internal/credentials google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/googlecloud google.golang.org/grpc/internal/grpclog -google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle