build(deps): bump the go-deps group with 7 updates

Bumps the go-deps group with 7 updates:

| Package | From | To |
| --- | --- | --- |
| [github.com/Azure/azure-sdk-for-go/sdk/storage/azblob](https://github.com/Azure/azure-sdk-for-go) | `1.1.0` | `1.2.0` |
| [github.com/google/go-cmp](https://github.com/google/go-cmp) | `0.5.9` | `0.6.0` |
| [github.com/labstack/echo/v4](https://github.com/labstack/echo) | `4.11.1` | `4.11.2` |
| [github.com/openshift-online/ocm-sdk-go](https://github.com/openshift-online/ocm-sdk-go) | `0.1.371` | `0.1.373` |
| [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) | `1.16.0` | `1.17.0` |
| [golang.org/x/sync](https://github.com/golang/sync) | `0.3.0` | `0.4.0` |
| [google.golang.org/api](https://github.com/googleapis/google-api-go-client) | `0.145.0` | `0.146.0` |


Updates `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` from 1.1.0 to 1.2.0
- [Release notes](https://github.com/Azure/azure-sdk-for-go/releases)
- [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md)
- [Commits](https://github.com/Azure/azure-sdk-for-go/compare/v1.1...v1.2)

Updates `github.com/google/go-cmp` from 0.5.9 to 0.6.0
- [Release notes](https://github.com/google/go-cmp/releases)
- [Commits](https://github.com/google/go-cmp/compare/v0.5.9...v0.6.0)

Updates `github.com/labstack/echo/v4` from 4.11.1 to 4.11.2
- [Release notes](https://github.com/labstack/echo/releases)
- [Changelog](https://github.com/labstack/echo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/labstack/echo/compare/v4.11.1...v4.11.2)

Updates `github.com/openshift-online/ocm-sdk-go` from 0.1.371 to 0.1.373
- [Release notes](https://github.com/openshift-online/ocm-sdk-go/releases)
- [Changelog](https://github.com/openshift-online/ocm-sdk-go/blob/main/CHANGES.md)
- [Commits](https://github.com/openshift-online/ocm-sdk-go/compare/v0.1.371...v0.1.373)

Updates `github.com/prometheus/client_golang` from 1.16.0 to 1.17.0
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.16.0...v1.17.0)

Updates `golang.org/x/sync` from 0.3.0 to 0.4.0
- [Commits](https://github.com/golang/sync/compare/v0.3.0...v0.4.0)

Updates `google.golang.org/api` from 0.145.0 to 0.146.0
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.145.0...v0.146.0)

---
updated-dependencies:
- dependency-name: github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: go-deps
- dependency-name: github.com/google/go-cmp
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: go-deps
- dependency-name: github.com/labstack/echo/v4
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: go-deps
- dependency-name: github.com/openshift-online/ocm-sdk-go
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: go-deps
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: go-deps
- dependency-name: golang.org/x/sync
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: go-deps
- dependency-name: google.golang.org/api
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: go-deps
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2023-10-12 04:43:21 +00:00 committed by Tomáš Hozza
parent 0a255df1ca
commit d4af58c9f5
136 changed files with 2587 additions and 1394 deletions

22
go.mod
View file

@ -8,7 +8,7 @@ require (
cloud.google.com/go/compute v1.23.0
cloud.google.com/go/storage v1.33.0
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
github.com/Azure/go-autorest/autorest v0.11.29
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12
github.com/BurntSushi/toml v1.3.2
@ -19,7 +19,7 @@ require (
github.com/getkin/kin-openapi v0.93.0
github.com/gobwas/glob v0.2.3
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/google/go-cmp v0.5.9
github.com/google/go-cmp v0.6.0
github.com/google/uuid v1.3.1
github.com/gophercloud/gophercloud v1.7.0
github.com/hashicorp/go-retryablehttp v0.7.4
@ -27,12 +27,12 @@ require (
github.com/jackc/pgx/v4 v4.18.1
github.com/julienschmidt/httprouter v1.3.0
github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
github.com/labstack/echo/v4 v4.11.1
github.com/labstack/echo/v4 v4.11.2
github.com/labstack/gommon v0.4.0
github.com/openshift-online/ocm-sdk-go v0.1.371
github.com/openshift-online/ocm-sdk-go v0.1.373
github.com/oracle/oci-go-sdk/v54 v54.0.0
github.com/osbuild/images v0.11.0
github.com/prometheus/client_golang v1.16.0
github.com/prometheus/client_golang v1.17.0
github.com/segmentio/ksuid v1.0.4
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.7.0
@ -41,16 +41,16 @@ require (
github.com/vmware/govmomi v0.32.0
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
golang.org/x/oauth2 v0.13.0
golang.org/x/sync v0.3.0
golang.org/x/sync v0.4.0
golang.org/x/sys v0.13.0
google.golang.org/api v0.145.0
google.golang.org/api v0.146.0
)
require (
cloud.google.com/go v0.110.7 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect
@ -142,9 +142,9 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/proglottis/gpgme v0.1.3 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
github.com/sigstore/fulcio v1.4.0 // indirect
@ -166,7 +166,7 @@ require (
go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.16.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect

44
go.sum
View file

@ -44,14 +44,14 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
@ -320,8 +320,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -481,8 +481,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/labstack/echo/v4 v4.11.1 h1:dEpLU2FLg4UVmvCGPuk/APjlH6GDpbEPti61srUUUs4=
github.com/labstack/echo/v4 v4.11.1/go.mod h1:YuYRTSM3CHs2ybfrL8Px48bO6BAnYIN4l8wSTMP6BDQ=
github.com/labstack/echo/v4 v4.11.2 h1:T+cTLQxWCDfqDEoydYm5kCobjmHwOwcv4OJAPHilmdE=
github.com/labstack/echo/v4 v4.11.2/go.mod h1:UcGuQ8V6ZNRmSweBIJkPvGfwCMIlFmiqrPqiEBfPYws=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
@ -574,8 +574,8 @@ github.com/opencontainers/runc v1.1.9 h1:XR0VIHTGce5eWPkaPesqTBrhW2yAcaraWfsEalN
github.com/opencontainers/runc v1.1.9/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/openshift-online/ocm-sdk-go v0.1.371 h1:9/VPu3YZG1XxSwno81UWbOGFlr1fJU4mQIlvVqZhhHs=
github.com/openshift-online/ocm-sdk-go v0.1.371/go.mod h1:KYOw8kAKAHyPrJcQoVR82CneQ4ofC02Na4cXXaTq4Nw=
github.com/openshift-online/ocm-sdk-go v0.1.373 h1:oYdLASWXVkcB8nezoiGsKY14sIE7YNtvBn1HCFDqpGc=
github.com/openshift-online/ocm-sdk-go v0.1.373/go.mod h1:KYOw8kAKAHyPrJcQoVR82CneQ4ofC02Na4cXXaTq4Nw=
github.com/oracle/oci-go-sdk/v54 v54.0.0 h1:CDLjeSejv2aDpElAJrhKpi6zvT/zhZCZuXchUUZ+LS4=
github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc=
github.com/osbuild/images v0.11.0 h1:oanmis+PPXm8A8yDyxmccyce9NuGav9Q9R6ZEqBfsm4=
@ -595,14 +595,14 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
@ -614,8 +614,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@ -849,8 +849,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos=
golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -872,8 +872,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1047,8 +1047,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.145.0 h1:kBjvf1A3/m30kUvnUX9jZJxTu3lJrpGFt5V/1YZrjwg=
google.golang.org/api v0.145.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM=
google.golang.org/api v0.146.0 h1:9aBYT4vQXt9dhCuLNfwfd3zpwu8atg0yPkjBymwSrOM=
google.golang.org/api v0.146.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=

View file

@ -1,11 +1,50 @@
# Release History
## 1.7.0 (2023-07-12)
### Features Added
* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing.
### Breaking Changes
> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2
* The beta features for CAE, tracing, and fakes have been omitted for this release.
## 1.7.0-beta.2 (2023-06-06)
### Breaking Changes
> These changes affect only code written against beta version v1.7.0-beta.1
* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed.
* This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`.
* Method `AddError()` has been removed from type `tracing.Span`.
* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`.
## 1.6.1 (2023-06-06)
### Bugs Fixed
* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry.
### Other Changes
* This version contains all bug fixes from `v1.7.0-beta.1`
## 1.7.0-beta.1 (2023-05-24)
### Features Added
* Restored CAE support for ARM clients.
* Added supporting features to enable distributed tracing.
* Added func `runtime.StartSpan()` for use by SDKs to start spans.
* Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context.
* Added field `TracingNamespace` to `runtime.PipelineOptions`.
* Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types.
* Added field `SpanFromContext` to `tracing.TracerOptions`.
* Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`.
* Added supporting pipeline policies to include HTTP spans when creating clients.
* Added package `fake` to support generated fakes packages in SDKs.
* The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations.
* Added an internal fake poller implementation.
### Bugs Fixed
* Retry policy always clones the underlying `*http.Request` before invoking the next policy.
* Added some non-standard error codes to the list of error codes for unregistered resource providers.
* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry.
## 1.6.0 (2023-05-04)

View file

@ -73,6 +73,10 @@ type ClientOptions = policy.ClientOptions
type Client struct {
pl runtime.Pipeline
tr tracing.Tracer
// cached on the client to support shallow copying with new values
tp tracing.Provider
modVer string
}
// NewClient creates a new Client instance with the provided values.
@ -100,7 +104,13 @@ func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions,
pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options)
tr := options.TracingProvider.NewTracer(client, moduleVersion)
return &Client{pl: pl, tr: tr}, nil
return &Client{
pl: pl,
tr: tr,
tp: options.TracingProvider,
modVer: moduleVersion,
}, nil
}
// Pipeline returns the pipeline for this client.
@ -112,3 +122,11 @@ func (c *Client) Pipeline() runtime.Pipeline {
func (c *Client) Tracer() tracing.Tracer {
return c.tr
}
// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName.
// Note that the values for module name and version will be preserved from the source Client.
// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans
func (c *Client) WithClientName(clientName string) *Client {
tr := c.tp.NewTracer(clientName, c.modVer)
return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer}
}

View file

@ -32,5 +32,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.6.1"
Version = "v1.7.0"
)

View file

@ -1,5 +1,29 @@
# Release History
## 1.2.0 (2023-10-11)
### Bugs Fixed
* Fixed null pointer exception when `SetImmutabilityPolicyOptions` is passed as `nil`.
## 1.2.0-beta.1 (2023-09-18)
### Features Added
* Added support for service version 2020-12-06, 2021-02-12, 2021-04-10, 2021-06-08, 2021-08-06 , 2021-10-04, 2021-12-02, 2022-11-02, 2023-01-03, 2023-05-03, and 2023-08-03
* Added support for [Cold Tier](https://learn.microsoft.com/azure/storage/blobs/access-tiers-overview?tabs=azure-portal).
* Added `CopySourceTag` option for `UploadBlobFromURLOptions`
* Added [FilterBlobs by Tags](https://learn.microsoft.com/rest/api/storageservices/find-blobs-by-tags-container) API for container client.
* Added `System` option to `ListContainersInclude` to allow listing of system containers (i.e, $web).
* Updated the SAS Version to `2021-12-02` and added `Encryption Scope` to Account SAS, Service SAS, and User Delegation SAS
* Added `ArchiveStatusRehydratePendingToCold` value to `ArchiveStatus` enum.
* Content length limit for `AppendBlob.AppendBlock()` and `AppendBlob.AppendBlockFromURL()` raised from 4 MB to 100 MB.
### Bugs Fixed
* Fixed issue where some requests fail with mismatch in string to sign.
* Fixed service SAS creation where expiry time or permissions can be omitted when stored access policy is used. Fixes [#21229](https://github.com/Azure/azure-sdk-for-go/issues/21229).
### Other Changes
* Updating version of azcore to 1.6.0.
## 1.1.0 (2023-07-13)
### Features Added
@ -15,7 +39,6 @@
* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475).
* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value.
* Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735).
### Other Changes

View file

@ -1,6 +1,6 @@
# Azure Blob Storage module for Go
> Service Version: 2020-10-02
> Service Version: 2023-08-03
Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob
Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or

View file

@ -9,6 +9,7 @@ package appendblob
import (
"context"
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"io"
"os"
"time"
@ -35,12 +36,14 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName,
exported.ModuleVersion, runtime.PipelineOptions{},
&conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -49,12 +52,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName,
exported.ModuleVersion,
runtime.PipelineOptions{},
&conOptions.ClientOptions)
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -64,13 +68,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client,
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName,
exported.ModuleVersion,
runtime.PipelineOptions{},
&conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil
azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewAppendBlobClient(blobURL, azClient, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -130,7 +135,7 @@ func (ab *Client) WithSnapshot(snapshot string) (*Client, error) {
}
p.Snapshot = snapshot
return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil
return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
@ -142,7 +147,7 @@ func (ab *Client) WithVersionID(versionID string) (*Client, error) {
}
p.VersionID = versionID
return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil
return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil
}
// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob.

View file

@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/storage/azblob",
"Tag": "go/storage/azblob_a772b9c866"
"Tag": "go/storage/azblob_818d8addd0"
}

View file

@ -8,15 +8,16 @@ package blob
import (
"context"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"io"
"os"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
@ -37,10 +38,13 @@ type Client base.Client[generated.BlobClient]
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewBlobClient(blobURL, pl, &cred)), nil
azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlobClient(blobURL, azClient, &cred)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -49,9 +53,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlobClient(blobURL, azClient, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -61,10 +68,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client,
func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil
azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlobClient(blobURL, azClient, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -116,7 +126,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) {
}
p.Snapshot = snapshot
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil
return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
@ -128,7 +138,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) {
}
p.VersionID = versionID
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil
return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
@ -261,8 +271,8 @@ func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetL
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) {
copyOptions, smac, mac, lac := options.format()
resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac)
copyOptions, smac, mac, lac, cpkScopeInfo := options.format()
resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac, cpkScopeInfo)
return resp, err
}
@ -314,8 +324,8 @@ func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o
// Concurrent Download Functions -----------------------------------------------------------------------------------------
// download downloads an Azure blob to a WriterAt in parallel.
func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) {
// downloadBuffer downloads an Azure blob to a WriterAt in parallel.
func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) {
if o.BlockSize == 0 {
o.BlockSize = DefaultDownloadBlockSize
}
@ -343,6 +353,7 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
NumChunks: uint16(((count - 1) / o.BlockSize) + 1),
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, chunkStart int64, count int64) error {
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
@ -381,6 +392,168 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt
return count, nil
}
// downloadFile downloads an Azure blob to a Writer. The blocks are downloaded parallely,
// but written to file serially
func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadOptions) (int64, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if o.BlockSize == 0 {
o.BlockSize = DefaultDownloadBlockSize
}
if o.Concurrency == 0 {
o.Concurrency = DefaultConcurrency
}
count := o.Range.Count
if count == CountToEnd { //Calculate size if not specified
gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions())
if err != nil {
return 0, err
}
count = *gr.ContentLength - o.Range.Offset
}
if count <= 0 {
// The file is empty, there is nothing to download.
return 0, nil
}
progress := int64(0)
progressLock := &sync.Mutex{}
// helper routine to get body
getBodyForRange := func(ctx context.Context, chunkStart, size int64) (io.ReadCloser, error) {
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
Offset: chunkStart + o.Range.Offset,
Count: size,
}, nil)
dr, err := b.DownloadStream(ctx, downloadBlobOptions)
if err != nil {
return nil, err
}
var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
body = streaming.NewResponseProgress(
body,
func(bytesTransferred int64) {
diff := bytesTransferred - rangeProgress
rangeProgress = bytesTransferred
progressLock.Lock()
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
return body, nil
}
// if file fits in a single buffer, we'll download here.
if count <= o.BlockSize {
body, err := getBodyForRange(ctx, int64(0), count)
if err != nil {
return 0, err
}
defer body.Close()
return io.Copy(writer, body)
}
buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize)
defer buffers.Free()
aquireBuffer := func() ([]byte, error) {
select {
case b := <-buffers.Acquire():
// got a buffer
return b, nil
default:
// no buffer available; allocate a new buffer if possible
if _, err := buffers.Grow(); err != nil {
return nil, err
}
// either grab the newly allocated buffer or wait for one to become available
return <-buffers.Acquire(), nil
}
}
numChunks := uint16((count-1)/o.BlockSize) + 1
blocks := make([]chan []byte, numChunks)
for b := range blocks {
blocks[b] = make(chan []byte)
}
/*
* We have created as many channels as the number of chunks we have.
* Each downloaded block will be sent to the channel matching its
* sequece number, i.e. 0th block is sent to 0th channel, 1st block
* to 1st channel and likewise. The blocks are then read and written
* to the file serially by below goroutine. Do note that the blocks
* blocks are still downloaded parallelly from n/w, only serailized
* and written to file here.
*/
writerError := make(chan error)
go func(ch chan error) {
for _, block := range blocks {
select {
case <-ctx.Done():
return
case block := <-block:
_, err := writer.Write(block)
buffers.Release(block)
if err != nil {
ch <- err
return
}
}
}
ch <- nil
}(writerError)
// Prepare and do parallel download.
err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
NumChunks: numChunks,
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, chunkStart int64, count int64) error {
buff, err := aquireBuffer()
if err != nil {
return err
}
body, err := getBodyForRange(ctx, chunkStart, count)
if err != nil {
buffers.Release(buff)
return nil
}
_, err = io.ReadFull(body, buff[:count])
body.Close()
if err != nil {
return err
}
blockIndex := (chunkStart / o.BlockSize)
blocks[blockIndex] <- buff
return nil
},
})
if err != nil {
return 0, err
}
// error from writer thread.
if err = <-writerError; err != nil {
return 0, err
}
return count, nil
}
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) {
@ -409,7 +582,7 @@ func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadB
if o == nil {
o = &DownloadBufferOptions{}
}
return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o))
return b.downloadBuffer(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o))
}
// DownloadFile downloads an Azure blob to a local file.
@ -448,7 +621,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil
}
if size > 0 {
return b.download(ctx, file, *do)
return b.downloadFile(ctx, file, *do)
} else { // if the blob's size is 0, there is no need in downloading it
return 0, nil
}

View file

@ -9,6 +9,7 @@ package blob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
const (
@ -18,6 +19,9 @@ const (
// DefaultDownloadBlockSize is default block size
DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
// DefaultConcurrency is the default number of blocks downloaded or uploaded in parallel
DefaultConcurrency = shared.DefaultConcurrency
)
// BlobType defines values for BlobType
@ -53,6 +57,7 @@ type AccessTier = generated.AccessTier
const (
AccessTierArchive AccessTier = generated.AccessTierArchive
AccessTierCool AccessTier = generated.AccessTierCool
AccessTierCold AccessTier = generated.AccessTierCold
AccessTierHot AccessTier = generated.AccessTierHot
AccessTierP10 AccessTier = generated.AccessTierP10
AccessTierP15 AccessTier = generated.AccessTierP15
@ -148,6 +153,7 @@ type ArchiveStatus = generated.ArchiveStatus
const (
ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool
ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot
ArchiveStatusRehydratePendingToCold ArchiveStatus = generated.ArchiveStatusRehydratePendingToCold
)
// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type.

View file

@ -458,7 +458,7 @@ type SetImmutabilityPolicyOptions struct {
func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil
return &generated.BlobClientSetImmutabilityPolicyOptions{}, nil
}
ac := &exported.BlobAccessConditions{
ModifiedAccessConditions: o.ModifiedAccessConditions,
@ -544,11 +544,13 @@ type CopyFromURLOptions struct {
SourceModifiedAccessConditions *SourceModifiedAccessConditions
BlobAccessConditions *AccessConditions
CPKScopeInfo *CPKScopeInfo
}
func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.CPKScopeInfo) {
if o == nil {
return nil, nil, nil, nil
return nil, nil, nil, nil, nil
}
options := &generated.BlobClientCopyFromURLOptions{
@ -563,7 +565,7 @@ func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions)
return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions
return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, o.CPKScopeInfo
}
// ---------------------------------------------------------------------------------------------------------------------

View file

@ -153,4 +153,5 @@ const (
var (
// MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential.
MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential")
UnsupportedChecksum = errors.New("for multi-part uploads, user generated checksums cannot be validated")
)

View file

@ -18,6 +18,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// blockWriter provides methods to upload blocks that represent a file to a server and commit them.
@ -28,27 +29,8 @@ type blockWriter interface {
CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error)
}
// bufferManager provides an abstraction for the management of buffers.
// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm.
type bufferManager[T ~[]byte] interface {
// Acquire returns the channel that contains the pool of buffers.
Acquire() <-chan T
// Release releases the buffer back to the pool for reuse/cleanup.
Release(T)
// Grow grows the number of buffers, up to the predefined max.
// It returns the total number of buffers or an error.
// No error is returned if the number of buffers has reached max.
// This is called only from the reading goroutine.
Grow() (int, error)
// Free cleans up all buffers.
Free()
}
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) (CommitBlockListResponse, error) {
func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) shared.BufferManager[T]) (CommitBlockListResponse, error) {
options.setDefaults()
wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing
@ -265,49 +247,3 @@ func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID {
func (ubi uuidBlockID) ToBase64() string {
return blockID(ubi).ToBase64()
}
// mmbPool implements the bufferManager interface.
// it uses anonymous memory mapped files for buffers.
// don't use this type directly, use newMMBPool() instead.
type mmbPool struct {
buffers chan mmb
count int
max int
size int64
}
func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] {
return &mmbPool{
buffers: make(chan mmb, maxBuffers),
max: maxBuffers,
size: bufferSize,
}
}
func (pool *mmbPool) Acquire() <-chan mmb {
return pool.buffers
}
func (pool *mmbPool) Grow() (int, error) {
if pool.count < pool.max {
buffer, err := newMMB(pool.size)
if err != nil {
return 0, err
}
pool.buffers <- buffer
pool.count++
}
return pool.count, nil
}
func (pool *mmbPool) Release(buffer mmb) {
pool.buffers <- buffer
}
func (pool *mmbPool) Free() {
for i := 0; i < pool.count; i++ {
buffer := <-pool.buffers
buffer.delete()
}
pool.count = 0
}

View file

@ -11,9 +11,12 @@ import (
"context"
"encoding/base64"
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"io"
"math"
"os"
"reflect"
"sync"
"time"
@ -43,10 +46,13 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -55,9 +61,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -67,10 +77,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client,
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil
azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewBlockBlobClient(blobURL, azClient, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -130,7 +144,7 @@ func (bb *Client) WithSnapshot(snapshot string) (*Client, error) {
}
p.Snapshot = snapshot
return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil
return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
@ -142,7 +156,7 @@ func (bb *Client) WithVersionID(versionID string) (*Client, error) {
}
p.VersionID = versionID
return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil
return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil
}
// Upload creates a new block blob or overwrites an existing block blob.
@ -160,6 +174,13 @@ func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *U
opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format()
if options != nil && options.TransactionalValidation != nil {
body, err = options.TransactionalValidation.Apply(body, opts)
if err != nil {
return UploadResponse{}, err
}
}
resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions)
return resp, err
}
@ -248,6 +269,11 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string,
ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime,
}
// If user attempts to pass in their own checksum, errors out.
if options.TransactionalContentMD5 != nil || options.TransactionalContentCRC64 != nil {
return CommitBlockListResponse{}, bloberror.UnsupportedChecksum
}
headers = options.HTTPHeaders
leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions)
cpkInfo = options.CPKInfo
@ -440,6 +466,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu
OperationName: "uploadFromReader",
TransferSize: actualSize,
ChunkSize: o.BlockSize,
NumChunks: uint16(((actualSize - 1) / o.BlockSize) + 1),
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, offset int64, chunkSize int64) error {
// This function is called once per block.
@ -494,6 +521,12 @@ func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBuff
if o != nil {
uploadOptions = *o
}
// If user attempts to pass in their own checksum, errors out.
if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func {
return UploadBufferResponse{}, bloberror.UnsupportedChecksum
}
return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions)
}
@ -507,6 +540,12 @@ func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOp
if o != nil {
uploadOptions = *o
}
// If user attempts to pass in their own checksum, errors out.
if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func {
return UploadFileResponse{}, bloberror.UnsupportedChecksum
}
return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions)
}
@ -517,7 +556,12 @@ func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStr
o = &UploadStreamOptions{}
}
result, err := copyFromReader(ctx, body, bb, *o, newMMBPool)
// If user attempts to pass in their own checksum, errors out.
if o.TransactionalValidation != nil && reflect.TypeOf(o.TransactionalValidation).Kind() != reflect.Func {
return UploadStreamResponse{}, bloberror.UnsupportedChecksum
}
result, err := copyFromReader(ctx, body, bb, *o, shared.NewMMBPool)
if err != nil {
return CommitBlockListResponse{}, err
}

View file

@ -37,3 +37,16 @@ const (
func PossibleBlockListTypeValues() []BlockListType {
return generated.PossibleBlockListTypeValues()
}
// BlobCopySourceTags - can be 'COPY' or 'REPLACE'
type BlobCopySourceTags = generated.BlobCopySourceTags
const (
BlobCopySourceTagsCopy = generated.BlobCopySourceTagsCOPY
BlobCopySourceTagsReplace = generated.BlobCopySourceTagsREPLACE
)
// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type.
func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags {
return generated.PossibleBlobCopySourceTagsValues()
}

View file

@ -36,8 +36,9 @@ type UploadOptions struct {
// Optional. Indicates the tier to be set on the blob.
Tier *blob.AccessTier
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
// TransactionalValidation specifies the transfer validation type to use.
// The default is nil (no transfer validation).
TransactionalValidation blob.TransferValidationType
HTTPHeaders *blob.HTTPHeaders
CPKInfo *blob.CPKInfo
@ -46,6 +47,9 @@ type UploadOptions struct {
LegalHold *bool
ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting
ImmutabilityPolicyExpiryTime *time.Time
// Deprecated: TransactionalContentMD5 can be set by using TransactionalValidation instead
TransactionalContentMD5 []byte
}
func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions,
@ -81,6 +85,9 @@ type UploadBlobFromURLOptions struct {
// Optional, default is true. Indicates if properties from the source blob should be copied.
CopySourceBlobProperties *bool
// Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags.
CopySourceTags *BlobCopySourceTags
// Optional. Specifies a user-defined name-value pair associated with the blob.
Metadata map[string]*string
@ -109,6 +116,7 @@ func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFr
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags),
CopySourceAuthorization: o.CopySourceAuthorization,
CopySourceBlobProperties: o.CopySourceBlobProperties,
CopySourceTags: o.CopySourceTags,
Metadata: o.Metadata,
SourceContentMD5: o.SourceContentMD5,
Tier: o.Tier,
@ -190,8 +198,6 @@ type CommitBlockListOptions struct {
RequestID *string
Tier *blob.AccessTier
Timeout *int32
TransactionalContentCRC64 []byte
TransactionalContentMD5 []byte
HTTPHeaders *blob.HTTPHeaders
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
@ -199,6 +205,12 @@ type CommitBlockListOptions struct {
LegalHold *bool
ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting
ImmutabilityPolicyExpiryTime *time.Time
// Deprecated: TransactionalContentCRC64 cannot be generated
TransactionalContentCRC64 []byte
// Deprecated: TransactionalContentMD5 cannot be generated
TransactionalContentMD5 []byte
}
// ---------------------------------------------------------------------------------------------------------------------
@ -253,9 +265,10 @@ type uploadFromReaderOptions struct {
TransactionalValidation blob.TransferValidationType
// Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
// Deprecated: TransactionalContentCRC64 cannot be generated at block level
TransactionalContentCRC64 uint64
// Specify the transactional md5 for the body, to be validated by the service.
// Deprecated: TransactionalContentMD5 cannot be generated at block level
TransactionalContentMD5 []byte
}

View file

@ -44,10 +44,13 @@ type Client base.Client[generated.ContainerClient]
func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewContainerClient(containerURL, pl, &cred)), nil
azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewContainerClient(containerURL, azClient, &cred)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -56,9 +59,12 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewContainerClient(containerURL, azClient, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -68,10 +74,13 @@ func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Cl
func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil
azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewContainerClient(containerURL, azClient, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -124,7 +133,7 @@ func (c *Client) URL() string {
func (c *Client) NewBlobClient(blobName string) *blob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.credential()))
return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlobClient), c.credential()))
}
// NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of
@ -133,7 +142,7 @@ func (c *Client) NewBlobClient(blobName string) *blob.Client {
func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.AppendBlobClient), c.sharedKey()))
}
// NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of
@ -142,7 +151,7 @@ func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client {
func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlockBlobClient), c.sharedKey()))
}
// NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of
@ -151,7 +160,7 @@ func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client {
func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.PageBlobClient), c.sharedKey()))
}
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
@ -272,7 +281,7 @@ func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[L
if err != nil {
return ListBlobsFlatResponse{}, err
}
resp, err := c.generated().Pipeline().Do(req)
resp, err := c.generated().InternalClient().Pipeline().Do(req)
if err != nil {
return ListBlobsFlatResponse{}, err
}
@ -308,7 +317,7 @@ func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierar
if err != nil {
return ListBlobsHierarchyResponse{}, err
}
resp, err := c.generated().Pipeline().Do(req)
resp, err := c.generated().InternalClient().Pipeline().Do(req)
if err != nil {
return ListBlobsHierarchyResponse{}, err
}
@ -412,3 +421,12 @@ func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *Sub
Version: resp.Version,
}, nil
}
// FilterBlobs operation finds all blobs in the container whose tags match a given search expression.
// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags-container
// eg. "dog='germanshepherd' and penguin='emperorpenguin'"
func (c *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) {
containerClientFilterBlobsOptions := o.format()
resp, err := c.generated().FilterBlobs(ctx, where, containerClientFilterBlobsOptions)
return resp, err
}

View file

@ -397,3 +397,31 @@ type SubmitBatchOptions struct {
func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// FilterBlobsOptions provides set of options for Client.FilterBlobs.
type FilterBlobsOptions struct {
// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
// operation returns the NextMarker value within the response body if the listing
// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
// as the value for the marker parameter in a subsequent call to request the next
// page of list items. The marker value is opaque to the client.
Marker *string
// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
// greater than 5000, the server will return up to 5000 items. Note that if the
// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
// of the results. For this reason, it is possible that the service will
// return fewer results than specified by maxresults, or than the default of 5000.
MaxResults *int32
}
func (o *FilterBlobsOptions) format() *generated.ContainerClientFilterBlobsOptions {
if o == nil {
return nil
}
return &generated.ContainerClientFilterBlobsOptions{
Marker: o.Marker,
Maxresults: o.MaxResults,
}
}

View file

@ -64,3 +64,6 @@ type SubmitBatchResponse struct {
// BatchResponseItem contains the response for the individual sub-requests.
type BatchResponseItem = exported.BatchResponseItem
// FilterBlobsResponse contains the response from method Client.FilterBlobs.
type FilterBlobsResponse = generated.ContainerClientFilterBlobsResponse

View file

@ -8,7 +8,6 @@ package base
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
@ -44,23 +43,23 @@ func NewClient[T any](inner *T) *Client[T] {
return &Client[T]{inner: inner}
}
func NewServiceClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ServiceClient] {
func NewServiceClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ServiceClient] {
return &Client[generated.ServiceClient]{
inner: generated.NewServiceClient(containerURL, pipeline),
inner: generated.NewServiceClient(containerURL, azClient),
credential: credential,
}
}
func NewContainerClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ContainerClient] {
func NewContainerClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ContainerClient] {
return &Client[generated.ContainerClient]{
inner: generated.NewContainerClient(containerURL, pipeline),
inner: generated.NewContainerClient(containerURL, azClient),
credential: credential,
}
}
func NewBlobClient(blobURL string, pipeline runtime.Pipeline, credential any) *Client[generated.BlobClient] {
func NewBlobClient(blobURL string, azClient *azcore.Client, credential any) *Client[generated.BlobClient] {
return &Client[generated.BlobClient]{
inner: generated.NewBlobClient(blobURL, pipeline),
inner: generated.NewBlobClient(blobURL, azClient),
credential: credential,
}
}
@ -75,26 +74,26 @@ func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) {
return &Client[T]{inner: client.innerT}, client.innerU
}
func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] {
func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] {
return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{
innerT: generated.NewBlobClient(blobURL, pipeline),
innerU: generated.NewAppendBlobClient(blobURL, pipeline),
innerT: generated.NewBlobClient(blobURL, azClient),
innerU: generated.NewAppendBlobClient(blobURL, azClient),
sharedKey: sharedKey,
}
}
func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] {
func NewBlockBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] {
return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{
innerT: generated.NewBlobClient(blobURL, pipeline),
innerU: generated.NewBlockBlobClient(blobURL, pipeline),
innerT: generated.NewBlobClient(blobURL, azClient),
innerU: generated.NewBlockBlobClient(blobURL, azClient),
sharedKey: sharedKey,
}
}
func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] {
func NewPageBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] {
return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{
innerT: generated.NewBlobClient(blobURL, pipeline),
innerU: generated.NewPageBlobClient(blobURL, pipeline),
innerT: generated.NewBlobClient(blobURL, azClient),
innerU: generated.NewPageBlobClient(blobURL, azClient),
sharedKey: sharedKey,
}
}

View file

@ -172,7 +172,7 @@ func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er
// Join the sorted key values separated by ','
// Then prepend "keyName:"; then add this string to the buffer
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ","))
}
}
return cr.String(), nil

View file

@ -8,5 +8,5 @@ package exported
const (
ModuleName = "azblob"
ModuleVersion = "v1.1.0"
ModuleVersion = "v1.2.0"
)

View file

@ -8,12 +8,25 @@
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
)
func (client *AppendBlobClient) Endpoint() string {
return client.endpoint
}
func (client *AppendBlobClient) Pipeline() runtime.Pipeline {
return client.pl
func (client *AppendBlobClient) InternalClient() *azcore.Client {
return client.internal
}
// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider.
func NewAppendBlobClient(endpoint string, azClient *azcore.Client) *AppendBlobClient {
client := &AppendBlobClient{
internal: azClient,
endpoint: endpoint,
}
return client
}

View file

@ -7,7 +7,7 @@ go: true
clear-output-folder: false
version: "^3.0.0"
license-header: MICROSOFT_MIT_NO_VERSION
input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json"
input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json"
credential-scope: "https://storage.azure.com/.default"
output-folder: ../generated
file-prefix: "zz_"
@ -19,7 +19,43 @@ modelerfour:
seal-single-value-enum-by-default: true
lenient-model-deduplication: true
export-clients: true
use: "@autorest/go@4.0.0-preview.45"
use: "@autorest/go@4.0.0-preview.49"
```
### Updating service version to 2023-08-03
```yaml
directive:
- from:
- zz_appendblob_client.go
- zz_blob_client.go
- zz_blockblob_client.go
- zz_container_client.go
- zz_pageblob_client.go
- zz_service_client.go
where: $
transform: >-
return $.
replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`).
replaceAll(`2021-12-02`, `2023-08-03`);
```
### Undo breaking change with BlobName
``` yaml
directive:
- from: zz_models.go
where: $
transform: >-
return $.
replace(/Name\s+\*BlobName/g, `Name *string`);
```
### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function
```yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
$.BlobItemInternal["x-ms-go-omit-serde-methods"] = true;
```
### Remove pager methods and export various generated methods in container client

View file

@ -8,8 +8,8 @@ package generated
import (
"context"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"time"
)
@ -20,8 +20,8 @@ func (client *BlobClient) Endpoint() string {
return client.endpoint
}
func (client *BlobClient) Pipeline() runtime.Pipeline {
return client.pl
func (client *BlobClient) InternalClient() *azcore.Client {
return client.internal
}
func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
@ -31,3 +31,14 @@ func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *Blob
func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions)
}
// NewBlobClient creates a new instance of BlobClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider.
func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient {
client := &BlobClient{
internal: azClient,
endpoint: endpoint,
}
return client
}

View file

@ -8,12 +8,25 @@
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
)
func (client *BlockBlobClient) Endpoint() string {
return client.endpoint
}
func (client *BlockBlobClient) Pipeline() runtime.Pipeline {
return client.pl
func (client *BlockBlobClient) Internal() *azcore.Client {
return client.internal
}
// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider.
func NewBlockBlobClient(endpoint string, azClient *azcore.Client) *BlockBlobClient {
client := &BlockBlobClient{
internal: azClient,
endpoint: endpoint,
}
return client
}

View file

@ -0,0 +1,9 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package generated
const ServiceVersion = "2023-08-03"

View file

@ -6,12 +6,25 @@
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
)
func (client *ContainerClient) Endpoint() string {
return client.endpoint
}
func (client *ContainerClient) Pipeline() runtime.Pipeline {
return client.pl
func (client *ContainerClient) InternalClient() *azcore.Client {
return client.internal
}
// NewContainerClient creates a new instance of ContainerClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - pl - the pipeline used for sending requests and handling responses.
func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient {
client := &ContainerClient{
internal: azClient,
endpoint: endpoint,
}
return client
}

View file

@ -6,6 +6,12 @@
package generated
import (
"encoding/xml"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"net/url"
)
type TransactionalContentSetter interface {
SetCRC64([]byte)
SetMD5([]byte)
@ -35,6 +41,14 @@ func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) {
p.TransactionalContentMD5 = v
}
func (b *BlockBlobClientUploadOptions) SetCRC64(v []byte) {
b.TransactionalContentCRC64 = v
}
func (b *BlockBlobClientUploadOptions) SetMD5(v []byte) {
b.TransactionalContentMD5 = v
}
type SourceContentSetter interface {
SetSourceContentCRC64(v []byte)
SetSourceContentMD5(v []byte)
@ -63,3 +77,65 @@ func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte
func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) {
p.SourceContentMD5 = v
}
// Custom UnmarshalXML functions for types that need special handling.
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPrefix.
func (b *BlobPrefix) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error {
type alias BlobPrefix
aux := &struct {
*alias
BlobName *BlobName `xml:"Name"`
}{
alias: (*alias)(b),
}
if err := dec.DecodeElement(aux, &start); err != nil {
return err
}
if aux.BlobName != nil {
if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded {
name, err := url.QueryUnescape(*aux.BlobName.Content)
// name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content)
if err != nil {
return err
}
b.Name = to.Ptr(string(name))
} else {
b.Name = aux.BlobName.Content
}
}
return nil
}
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem.
func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error {
type alias BlobItem
aux := &struct {
*alias
BlobName *BlobName `xml:"Name"`
Metadata additionalProperties `xml:"Metadata"`
OrMetadata additionalProperties `xml:"OrMetadata"`
}{
alias: (*alias)(b),
}
if err := dec.DecodeElement(aux, &start); err != nil {
return err
}
b.Metadata = (map[string]*string)(aux.Metadata)
b.OrMetadata = (map[string]*string)(aux.OrMetadata)
if aux.BlobName != nil {
if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded {
name, err := url.QueryUnescape(*aux.BlobName.Content)
// name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content)
if err != nil {
return err
}
b.Name = to.Ptr(string(name))
} else {
b.Name = aux.BlobName.Content
}
}
return nil
}

View file

@ -6,12 +6,25 @@
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
)
func (client *PageBlobClient) Endpoint() string {
return client.endpoint
}
func (client *PageBlobClient) Pipeline() runtime.Pipeline {
return client.pl
func (client *PageBlobClient) InternalClient() *azcore.Client {
return client.internal
}
// NewPageBlobClient creates a new instance of PageBlobClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider.
func NewPageBlobClient(endpoint string, azClient *azcore.Client) *PageBlobClient {
client := &PageBlobClient{
internal: azClient,
endpoint: endpoint,
}
return client
}

View file

@ -6,12 +6,25 @@
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
)
func (client *ServiceClient) Endpoint() string {
return client.endpoint
}
func (client *ServiceClient) Pipeline() runtime.Pipeline {
return client.pl
func (client *ServiceClient) InternalClient() *azcore.Client {
return client.internal
}
// NewServiceClient creates a new instance of ServiceClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider.
func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient {
client := &ServiceClient{
internal: azClient,
endpoint: endpoint,
}
return client
}

View file

@ -22,21 +22,10 @@ import (
)
// AppendBlobClient contains the methods for the AppendBlob group.
// Don't use this type directly, use NewAppendBlobClient() instead.
// Don't use this type directly, use a constructor function instead.
type AppendBlobClient struct {
internal *azcore.Client
endpoint string
pl runtime.Pipeline
}
// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - pl - the pipeline used for sending requests and handling responses.
func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient {
client := &AppendBlobClient{
endpoint: endpoint,
pl: pl,
}
return client
}
// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append
@ -44,7 +33,7 @@ func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient
// AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - body - Initial data
// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method.
@ -59,7 +48,7 @@ func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength i
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
@ -124,12 +113,15 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/octet-stream")
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
return req, nil
}
// appendBlockHandleResponse handles the AppendBlock response.
@ -207,7 +199,7 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (
// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - sourceURL - Specify a URL to the copy source.
// - contentLength - The length of the request.
// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL
@ -225,7 +217,7 @@ func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceUR
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
@ -309,7 +301,7 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -390,7 +382,7 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
// Create - The Create Append Blob operation creates a new append blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method.
// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
@ -403,7 +395,7 @@ func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64,
if err != nil {
return AppendBlobClientCreateResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return AppendBlobClientCreateResponse{}, err
}
@ -481,7 +473,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -560,7 +552,7 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen
// or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -571,7 +563,7 @@ func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobCli
if err != nil {
return AppendBlobClientSealResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return AppendBlobClientSealResponse{}, err
}
@ -593,7 +585,7 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}

View file

@ -23,28 +23,17 @@ import (
)
// BlobClient contains the methods for the Blob group.
// Don't use this type directly, use NewBlobClient() instead.
// Don't use this type directly, use a constructor function instead.
type BlobClient struct {
internal *azcore.Client
endpoint string
pl runtime.Pipeline
}
// NewBlobClient creates a new instance of BlobClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - pl - the pipeline used for sending requests and handling responses.
func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient {
client := &BlobClient{
endpoint: endpoint,
pl: pl,
}
return client
}
// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination
// blob with zero length and full metadata.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation.
// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -53,7 +42,7 @@ func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, o
if err != nil {
return BlobClientAbortCopyFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientAbortCopyFromURLResponse{}, err
}
@ -80,7 +69,7 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -113,7 +102,7 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B
// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite
// lease can be between 15 and 60 seconds. A lease duration cannot be changed using
// renew or change.
@ -124,7 +113,7 @@ func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, opti
if err != nil {
return BlobClientAcquireLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientAcquireLeaseResponse{}, err
}
@ -166,7 +155,7 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -212,7 +201,7 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC
// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) {
@ -220,7 +209,7 @@ func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBre
if err != nil {
return BlobClientBreakLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientBreakLeaseResponse{}, err
}
@ -261,7 +250,7 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -312,7 +301,7 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli
// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - leaseID - Specifies the current lease ID on the resource.
// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed
// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID
@ -324,7 +313,7 @@ func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, propo
if err != nil {
return BlobClientChangeLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientChangeLeaseResponse{}, err
}
@ -364,7 +353,7 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -411,7 +400,7 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl
// until the copy is complete.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@ -420,12 +409,13 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl
// method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) {
req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions)
// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) {
req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo)
if err != nil {
return BlobClientCopyFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientCopyFromURLResponse{}, err
}
@ -436,7 +426,7 @@ func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, op
}
// copyFromURLCreateRequest creates the CopyFromURL request.
func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
@ -488,7 +478,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -510,6 +500,12 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour
if options != nil && options.CopySourceAuthorization != nil {
req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if options != nil && options.CopySourceTags != nil {
req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@ -566,13 +562,16 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl
}
result.ContentCRC64 = contentCRC64
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
return result, nil
}
// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
@ -583,7 +582,7 @@ func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClien
if err != nil {
return BlobClientCreateSnapshotResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientCreateSnapshotResponse{}, err
}
@ -642,7 +641,7 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -708,7 +707,7 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo
// return an HTTP status code of 404 (ResourceNotFound).
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -717,7 +716,7 @@ func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteO
if err != nil {
return BlobClientDeleteResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientDeleteResponse{}, err
}
@ -768,7 +767,7 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -801,7 +800,7 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD
// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy
// method.
func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) {
@ -809,7 +808,7 @@ func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options
if err != nil {
return BlobClientDeleteImmutabilityPolicyResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientDeleteImmutabilityPolicyResponse{}, err
}
@ -831,7 +830,7 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -865,7 +864,7 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp
// can also call Download to read a snapshot.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@ -875,7 +874,7 @@ func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownl
if err != nil {
return BlobClientDownloadResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientDownloadResponse{}, err
}
@ -939,7 +938,7 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -957,6 +956,13 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("x-ms-creation-time"); val != "" {
creationTime, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlobClientDownloadResponse{}, err
}
result.CreationTime = &creationTime
}
for hh := range resp.Header {
if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") {
if result.Metadata == nil {
@ -1163,14 +1169,14 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method.
func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) {
req, err := client.getAccountInfoCreateRequest(ctx, options)
if err != nil {
return BlobClientGetAccountInfoResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientGetAccountInfoResponse{}, err
}
@ -1190,7 +1196,7 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio
reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@ -1227,7 +1233,7 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo
// for the blob. It does not return the content of the blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@ -1237,7 +1243,7 @@ func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClient
if err != nil {
return BlobClientGetPropertiesResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientGetPropertiesResponse{}, err
}
@ -1291,7 +1297,7 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1542,7 +1548,7 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
// GetTags - The Get Tags operation enables users to get the tags associated with a blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -1551,7 +1557,7 @@ func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTag
if err != nil {
return BlobClientGetTagsResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientGetTagsResponse{}, err
}
@ -1579,7 +1585,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo
reqQP.Set("versionid", *options.VersionID)
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1621,7 +1627,7 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient
// Query - The Query operation enables users to select/project on blob data by providing simple query expressions.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@ -1631,7 +1637,7 @@ func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOpt
if err != nil {
return BlobClientQueryResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientQueryResponse{}, err
}
@ -1684,13 +1690,16 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.QueryRequest != nil {
return req, runtime.MarshalAsXML(req, *options.QueryRequest)
if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil {
return nil, err
}
return req, nil
}
return req, nil
}
@ -1849,7 +1858,7 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - leaseID - Specifies the current lease ID on the resource.
// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -1858,7 +1867,7 @@ func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, opti
if err != nil {
return BlobClientReleaseLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientReleaseLeaseResponse{}, err
}
@ -1897,7 +1906,7 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1940,7 +1949,7 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC
// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - leaseID - Specifies the current lease ID on the resource.
// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -1949,7 +1958,7 @@ func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, option
if err != nil {
return BlobClientRenewLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientRenewLeaseResponse{}, err
}
@ -1988,7 +1997,7 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -2034,7 +2043,7 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli
// SetExpiry - Sets the time a blob will expire and be deleted.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - expiryOptions - Required. Indicates mode of the expiry time
// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method.
func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) {
@ -2042,7 +2051,7 @@ func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOpt
if err != nil {
return BlobClientSetExpiryResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientSetExpiryResponse{}, err
}
@ -2064,7 +2073,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -2111,7 +2120,7 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie
// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method.
// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -2121,7 +2130,7 @@ func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClien
if err != nil {
return BlobClientSetHTTPHeadersResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientSetHTTPHeadersResponse{}, err
}
@ -2179,7 +2188,7 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -2229,7 +2238,7 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo
// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy
// method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -2238,7 +2247,7 @@ func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *Bl
if err != nil {
return BlobClientSetImmutabilityPolicyResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientSetImmutabilityPolicyResponse{}, err
}
@ -2260,7 +2269,7 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -2312,7 +2321,7 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons
// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - legalHold - Specified if a legal hold should be set on the blob.
// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method.
func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) {
@ -2320,7 +2329,7 @@ func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, opti
if err != nil {
return BlobClientSetLegalHoldResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientSetLegalHoldResponse{}, err
}
@ -2342,7 +2351,7 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -2384,7 +2393,7 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC
// pairs
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@ -2395,7 +2404,7 @@ func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSe
if err != nil {
return BlobClientSetMetadataResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientSetMetadataResponse{}, err
}
@ -2454,7 +2463,7 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -2513,7 +2522,7 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl
// SetTags - The Set Tags operation enables users to set tags on a blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - tags - Blob tags
// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -2523,7 +2532,7 @@ func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *B
if err != nil {
return BlobClientSetTagsResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientSetTagsResponse{}, err
}
@ -2548,7 +2557,7 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag
reqQP.Set("versionid", *options.VersionID)
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
@ -2565,7 +2574,10 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, runtime.MarshalAsXML(req, tags)
if err := runtime.MarshalAsXML(req, tags); err != nil {
return nil, err
}
return req, nil
}
// setTagsHandleResponse handles the SetTags response.
@ -2596,7 +2608,7 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient
// storage type. This operation does not update the blob's ETag.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - tier - Indicates the tier to be set on the blob.
// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -2606,7 +2618,7 @@ func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options
if err != nil {
return BlobClientSetTierResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientSetTierResponse{}, err
}
@ -2638,7 +2650,7 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT
if options != nil && options.RehydratePriority != nil {
req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -2670,7 +2682,7 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient
// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@ -2684,7 +2696,7 @@ func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource strin
if err != nil {
return BlobClientStartCopyFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientStartCopyFromURLResponse{}, err
}
@ -2752,7 +2764,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -2819,14 +2831,14 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B
// Undelete - Undelete a blob that was previously soft deleted
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method.
func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) {
req, err := client.undeleteCreateRequest(ctx, options)
if err != nil {
return BlobClientUndeleteResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlobClientUndeleteResponse{}, err
}
@ -2848,7 +2860,7 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}

View file

@ -22,21 +22,10 @@ import (
)
// BlockBlobClient contains the methods for the BlockBlob group.
// Don't use this type directly, use NewBlockBlobClient() instead.
// Don't use this type directly, use a constructor function instead.
type BlockBlobClient struct {
internal *azcore.Client
endpoint string
pl runtime.Pipeline
}
// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - pl - the pipeline used for sending requests and handling responses.
func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient {
client := &BlockBlobClient{
endpoint: endpoint,
pl: pl,
}
return client
}
// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob.
@ -48,7 +37,7 @@ func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient {
// belong to.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - blocks - Blob Blocks.
// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList
// method.
@ -62,7 +51,7 @@ func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks Block
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
@ -148,7 +137,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -165,7 +154,10 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, runtime.MarshalAsXML(req, blocks)
if err := runtime.MarshalAsXML(req, blocks); err != nil {
return nil, err
}
return req, nil
}
// commitBlockListHandleResponse handles the CommitBlockList response.
@ -233,7 +225,7 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response
// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together.
// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -243,7 +235,7 @@ func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockL
if err != nil {
return BlockBlobClientGetBlockListResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlockBlobClientGetBlockListResponse{}, err
}
@ -275,7 +267,7 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -335,7 +327,7 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (
// Block from URL API in conjunction with Put Block List.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
@ -354,7 +346,7 @@ func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength
if err != nil {
return BlockBlobClientPutBlobFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlockBlobClientPutBlobFromURLResponse{}, err
}
@ -453,7 +445,7 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil {
req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -470,6 +462,9 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
if options != nil && options.CopySourceAuthorization != nil {
req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
if options != nil && options.CopySourceTags != nil {
req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@ -532,7 +527,7 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response)
// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
// parameter must be the same size for each block.
@ -547,7 +542,7 @@ func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, c
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
@ -592,12 +587,15 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/octet-stream")
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
return req, nil
}
// stageBlockHandleResponse handles the StageBlock response.
@ -653,7 +651,7 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
// are read from a URL.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
// parameter must be the same size for each block.
@ -671,7 +669,7 @@ func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID st
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
@ -732,7 +730,7 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -798,7 +796,7 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
// the content of a block blob, use the Put Block List operation.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - body - Initial data
// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method.
@ -812,7 +810,7 @@ func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64,
if err != nil {
return BlockBlobClientUploadResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return BlockBlobClientUploadResponse{}, err
}
@ -896,7 +894,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -912,8 +910,14 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
if options != nil && options.TransactionalContentCRC64 != nil {
req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/octet-stream")
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
return req, nil
}
// uploadHandleResponse handles the Upload response.

View file

@ -13,6 +13,7 @@ type AccessTier string
const (
AccessTierArchive AccessTier = "Archive"
AccessTierCold AccessTier = "Cold"
AccessTierCool AccessTier = "Cool"
AccessTierHot AccessTier = "Hot"
AccessTierP10 AccessTier = "P10"
@ -33,6 +34,7 @@ const (
func PossibleAccessTierValues() []AccessTier {
return []AccessTier{
AccessTierArchive,
AccessTierCold,
AccessTierCool,
AccessTierHot,
AccessTierP10,
@ -53,27 +55,28 @@ func PossibleAccessTierValues() []AccessTier {
type AccountKind string
const (
AccountKindStorage AccountKind = "Storage"
AccountKindBlobStorage AccountKind = "BlobStorage"
AccountKindStorageV2 AccountKind = "StorageV2"
AccountKindFileStorage AccountKind = "FileStorage"
AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage"
AccountKindFileStorage AccountKind = "FileStorage"
AccountKindStorage AccountKind = "Storage"
AccountKindStorageV2 AccountKind = "StorageV2"
)
// PossibleAccountKindValues returns the possible values for the AccountKind const type.
func PossibleAccountKindValues() []AccountKind {
return []AccountKind{
AccountKindStorage,
AccountKindBlobStorage,
AccountKindStorageV2,
AccountKindFileStorage,
AccountKindBlockBlobStorage,
AccountKindFileStorage,
AccountKindStorage,
AccountKindStorageV2,
}
}
type ArchiveStatus string
const (
ArchiveStatusRehydratePendingToCold ArchiveStatus = "rehydrate-pending-to-cold"
ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool"
ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot"
)
@ -81,25 +84,41 @@ const (
// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type.
func PossibleArchiveStatusValues() []ArchiveStatus {
return []ArchiveStatus{
ArchiveStatusRehydratePendingToCold,
ArchiveStatusRehydratePendingToCool,
ArchiveStatusRehydratePendingToHot,
}
}
type BlobCopySourceTags string
const (
BlobCopySourceTagsCOPY BlobCopySourceTags = "COPY"
BlobCopySourceTagsREPLACE BlobCopySourceTags = "REPLACE"
)
// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type.
func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags {
return []BlobCopySourceTags{
BlobCopySourceTagsCOPY,
BlobCopySourceTagsREPLACE,
}
}
// BlobGeoReplicationStatus - The status of the secondary location
type BlobGeoReplicationStatus string
const (
BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live"
BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap"
BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live"
BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable"
)
// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type.
func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus {
return []BlobGeoReplicationStatus{
BlobGeoReplicationStatusLive,
BlobGeoReplicationStatusBootstrap,
BlobGeoReplicationStatusLive,
BlobGeoReplicationStatusUnavailable,
}
}
@ -107,53 +126,53 @@ func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus {
type BlobType string
const (
BlobTypeAppendBlob BlobType = "AppendBlob"
BlobTypeBlockBlob BlobType = "BlockBlob"
BlobTypePageBlob BlobType = "PageBlob"
BlobTypeAppendBlob BlobType = "AppendBlob"
)
// PossibleBlobTypeValues returns the possible values for the BlobType const type.
func PossibleBlobTypeValues() []BlobType {
return []BlobType{
BlobTypeAppendBlob,
BlobTypeBlockBlob,
BlobTypePageBlob,
BlobTypeAppendBlob,
}
}
type BlockListType string
const (
BlockListTypeAll BlockListType = "all"
BlockListTypeCommitted BlockListType = "committed"
BlockListTypeUncommitted BlockListType = "uncommitted"
BlockListTypeAll BlockListType = "all"
)
// PossibleBlockListTypeValues returns the possible values for the BlockListType const type.
func PossibleBlockListTypeValues() []BlockListType {
return []BlockListType{
BlockListTypeAll,
BlockListTypeCommitted,
BlockListTypeUncommitted,
BlockListTypeAll,
}
}
type CopyStatusType string
const (
CopyStatusTypePending CopyStatusType = "pending"
CopyStatusTypeSuccess CopyStatusType = "success"
CopyStatusTypeAborted CopyStatusType = "aborted"
CopyStatusTypeFailed CopyStatusType = "failed"
CopyStatusTypePending CopyStatusType = "pending"
CopyStatusTypeSuccess CopyStatusType = "success"
)
// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type.
func PossibleCopyStatusTypeValues() []CopyStatusType {
return []CopyStatusType{
CopyStatusTypePending,
CopyStatusTypeSuccess,
CopyStatusTypeAborted,
CopyStatusTypeFailed,
CopyStatusTypePending,
CopyStatusTypeSuccess,
}
}
@ -190,15 +209,15 @@ func PossibleDeleteTypeValues() []DeleteType {
type EncryptionAlgorithmType string
const (
EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None"
EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256"
EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None"
)
// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type.
func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType {
return []EncryptionAlgorithmType{
EncryptionAlgorithmTypeNone,
EncryptionAlgorithmTypeAES256,
EncryptionAlgorithmTypeNone,
}
}
@ -221,50 +240,65 @@ func PossibleExpiryOptionsValues() []ExpiryOptions {
}
}
type FilterBlobsIncludeItem string
const (
FilterBlobsIncludeItemNone FilterBlobsIncludeItem = "none"
FilterBlobsIncludeItemVersions FilterBlobsIncludeItem = "versions"
)
// PossibleFilterBlobsIncludeItemValues returns the possible values for the FilterBlobsIncludeItem const type.
func PossibleFilterBlobsIncludeItemValues() []FilterBlobsIncludeItem {
return []FilterBlobsIncludeItem{
FilterBlobsIncludeItemNone,
FilterBlobsIncludeItemVersions,
}
}
type ImmutabilityPolicyMode string
const (
ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked"
ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable"
ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked"
ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked"
)
// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type.
func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode {
return []ImmutabilityPolicyMode{
ImmutabilityPolicyModeLocked,
ImmutabilityPolicyModeMutable,
ImmutabilityPolicyModeUnlocked,
ImmutabilityPolicyModeLocked,
}
}
type ImmutabilityPolicySetting string
const (
ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked"
ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked"
ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked"
)
// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type.
func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting {
return []ImmutabilityPolicySetting{
ImmutabilityPolicySettingUnlocked,
ImmutabilityPolicySettingLocked,
ImmutabilityPolicySettingUnlocked,
}
}
type LeaseDurationType string
const (
LeaseDurationTypeInfinite LeaseDurationType = "infinite"
LeaseDurationTypeFixed LeaseDurationType = "fixed"
LeaseDurationTypeInfinite LeaseDurationType = "infinite"
)
// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type.
func PossibleLeaseDurationTypeValues() []LeaseDurationType {
return []LeaseDurationType{
LeaseDurationTypeInfinite,
LeaseDurationTypeFixed,
LeaseDurationTypeInfinite,
}
}
@ -272,20 +306,20 @@ type LeaseStateType string
const (
LeaseStateTypeAvailable LeaseStateType = "available"
LeaseStateTypeLeased LeaseStateType = "leased"
LeaseStateTypeExpired LeaseStateType = "expired"
LeaseStateTypeBreaking LeaseStateType = "breaking"
LeaseStateTypeBroken LeaseStateType = "broken"
LeaseStateTypeExpired LeaseStateType = "expired"
LeaseStateTypeLeased LeaseStateType = "leased"
)
// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type.
func PossibleLeaseStateTypeValues() []LeaseStateType {
return []LeaseStateType{
LeaseStateTypeAvailable,
LeaseStateTypeLeased,
LeaseStateTypeExpired,
LeaseStateTypeBreaking,
LeaseStateTypeBroken,
LeaseStateTypeExpired,
LeaseStateTypeLeased,
}
}
@ -309,14 +343,14 @@ type ListBlobsIncludeItem string
const (
ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy"
ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted"
ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata"
ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots"
ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs"
ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions"
ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags"
ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions"
ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy"
ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold"
ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions"
ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata"
ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots"
ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags"
ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs"
ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions"
)
// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type.
@ -324,30 +358,30 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem {
return []ListBlobsIncludeItem{
ListBlobsIncludeItemCopy,
ListBlobsIncludeItemDeleted,
ListBlobsIncludeItemMetadata,
ListBlobsIncludeItemSnapshots,
ListBlobsIncludeItemUncommittedblobs,
ListBlobsIncludeItemVersions,
ListBlobsIncludeItemTags,
ListBlobsIncludeItemDeletedwithversions,
ListBlobsIncludeItemImmutabilitypolicy,
ListBlobsIncludeItemLegalhold,
ListBlobsIncludeItemDeletedwithversions,
ListBlobsIncludeItemMetadata,
ListBlobsIncludeItemSnapshots,
ListBlobsIncludeItemTags,
ListBlobsIncludeItemUncommittedblobs,
ListBlobsIncludeItemVersions,
}
}
type ListContainersIncludeType string
const (
ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata"
ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted"
ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata"
ListContainersIncludeTypeSystem ListContainersIncludeType = "system"
)
// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type.
func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType {
return []ListContainersIncludeType{
ListContainersIncludeTypeMetadata,
ListContainersIncludeTypeDeleted,
ListContainersIncludeTypeMetadata,
ListContainersIncludeTypeSystem,
}
}
@ -404,18 +438,18 @@ func PossiblePublicAccessTypeValues() []PublicAccessType {
type QueryFormatType string
const (
QueryFormatTypeArrow QueryFormatType = "arrow"
QueryFormatTypeDelimited QueryFormatType = "delimited"
QueryFormatTypeJSON QueryFormatType = "json"
QueryFormatTypeArrow QueryFormatType = "arrow"
QueryFormatTypeParquet QueryFormatType = "parquet"
)
// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type.
func PossibleQueryFormatTypeValues() []QueryFormatType {
return []QueryFormatType{
QueryFormatTypeArrow,
QueryFormatTypeDelimited,
QueryFormatTypeJSON,
QueryFormatTypeArrow,
QueryFormatTypeParquet,
}
}
@ -440,38 +474,38 @@ func PossibleRehydratePriorityValues() []RehydratePriority {
type SKUName string
const (
SKUNameStandardLRS SKUName = "Standard_LRS"
SKUNamePremiumLRS SKUName = "Premium_LRS"
SKUNameStandardGRS SKUName = "Standard_GRS"
SKUNameStandardLRS SKUName = "Standard_LRS"
SKUNameStandardRAGRS SKUName = "Standard_RAGRS"
SKUNameStandardZRS SKUName = "Standard_ZRS"
SKUNamePremiumLRS SKUName = "Premium_LRS"
)
// PossibleSKUNameValues returns the possible values for the SKUName const type.
func PossibleSKUNameValues() []SKUName {
return []SKUName{
SKUNameStandardLRS,
SKUNamePremiumLRS,
SKUNameStandardGRS,
SKUNameStandardLRS,
SKUNameStandardRAGRS,
SKUNameStandardZRS,
SKUNamePremiumLRS,
}
}
type SequenceNumberActionType string
const (
SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment"
SequenceNumberActionTypeMax SequenceNumberActionType = "max"
SequenceNumberActionTypeUpdate SequenceNumberActionType = "update"
SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment"
)
// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type.
func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType {
return []SequenceNumberActionType{
SequenceNumberActionTypeIncrement,
SequenceNumberActionTypeMax,
SequenceNumberActionTypeUpdate,
SequenceNumberActionTypeIncrement,
}
}

View file

@ -25,28 +25,17 @@ import (
)
// ContainerClient contains the methods for the Container group.
// Don't use this type directly, use NewContainerClient() instead.
// Don't use this type directly, use a constructor function instead.
type ContainerClient struct {
internal *azcore.Client
endpoint string
pl runtime.Pipeline
}
// NewContainerClient creates a new instance of ContainerClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - pl - the pipeline used for sending requests and handling responses.
func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient {
client := &ContainerClient{
endpoint: endpoint,
pl: pl,
}
return client
}
// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite
// lease can be between 15 and 60 seconds. A lease duration cannot be changed using
// renew or change.
@ -57,7 +46,7 @@ func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32,
if err != nil {
return ContainerClientAcquireLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientAcquireLeaseResponse{}, err
}
@ -91,7 +80,7 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -138,7 +127,7 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) {
@ -146,7 +135,7 @@ func (client *ContainerClient) BreakLease(ctx context.Context, options *Containe
if err != nil {
return ContainerClientBreakLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientBreakLeaseResponse{}, err
}
@ -179,7 +168,7 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -231,7 +220,7 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - leaseID - Specifies the current lease ID on the resource.
// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed
// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID
@ -243,7 +232,7 @@ func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string,
if err != nil {
return ContainerClientChangeLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientChangeLeaseResponse{}, err
}
@ -275,7 +264,7 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -322,7 +311,7 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C
// fails
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method.
// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method.
func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) {
@ -330,7 +319,7 @@ func (client *ContainerClient) Create(ctx context.Context, options *ContainerCli
if err != nil {
return ContainerClientCreateResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientCreateResponse{}, err
}
@ -362,7 +351,7 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options
if options != nil && options.Access != nil {
req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -412,7 +401,7 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai
// deleted during garbage collection
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -421,7 +410,7 @@ func (client *ContainerClient) Delete(ctx context.Context, options *ContainerCli
if err != nil {
return ContainerClientDeleteResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientDeleteResponse{}, err
}
@ -452,7 +441,7 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -482,11 +471,89 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai
return result, nil
}
// FilterBlobs - The Filter Blobs operation enables callers to list blobs in a container whose tags match a given search expression.
// Filter blobs searches within the given container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2023-08-03
// - where - Filters the results to return only to return only blobs whose tags match the specified expression.
// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method.
func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) {
req, err := client.filterBlobsCreateRequest(ctx, where, options)
if err != nil {
return ContainerClientFilterBlobsResponse{}, err
}
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientFilterBlobsResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return ContainerClientFilterBlobsResponse{}, runtime.NewResponseError(resp)
}
return client.filterBlobsHandleResponse(resp)
}
// filterBlobsCreateRequest creates the FilterBlobs request.
func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("restype", "container")
reqQP.Set("comp", "blobs")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
reqQP.Set("where", where)
if options != nil && options.Marker != nil {
reqQP.Set("marker", *options.Marker)
}
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
if options != nil && options.Include != nil {
reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
// filterBlobsHandleResponse handles the FilterBlobs response.
func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (ContainerClientFilterBlobsResponse, error) {
result := ContainerClientFilterBlobsResponse{}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return ContainerClientFilterBlobsResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil {
return ContainerClientFilterBlobsResponse{}, err
}
return result, nil
}
// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may
// be accessed publicly.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -495,7 +562,7 @@ func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *Con
if err != nil {
return ContainerClientGetAccessPolicyResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientGetAccessPolicyResponse{}, err
}
@ -521,7 +588,7 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context,
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -570,7 +637,7 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo
// method.
func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) {
@ -578,7 +645,7 @@ func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *Cont
if err != nil {
return ContainerClientGetAccountInfoResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientGetAccountInfoResponse{}, err
}
@ -598,7 +665,7 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context,
reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@ -635,7 +702,7 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response)
// does not include the container's list of blobs
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) {
@ -643,7 +710,7 @@ func (client *ContainerClient) GetProperties(ctx context.Context, options *Conta
if err != nil {
return ContainerClientGetPropertiesResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientGetPropertiesResponse{}, err
}
@ -668,7 +735,7 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -761,7 +828,7 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response)
// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager
// method.
//
@ -790,7 +857,7 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -828,7 +895,7 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp
// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that
// acts as a placeholder for all blobs whose names begin with the same substring up to the
// appearance of the delimiter character. The delimiter may be a single character or a string.
@ -850,7 +917,7 @@ func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string
if err != nil {
return ContainerClientListBlobHierarchySegmentResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientListBlobHierarchySegmentResponse{}, err
}
@ -888,7 +955,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -928,7 +995,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - leaseID - Specifies the current lease ID on the resource.
// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -937,7 +1004,7 @@ func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string,
if err != nil {
return ContainerClientReleaseLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientReleaseLeaseResponse{}, err
}
@ -968,7 +1035,7 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1011,7 +1078,7 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (
// Rename - Renames an existing container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - sourceContainerName - Required. Specifies the name of the container to rename.
// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method.
func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) {
@ -1019,7 +1086,7 @@ func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName s
if err != nil {
return ContainerClientRenameResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientRenameResponse{}, err
}
@ -1042,7 +1109,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1080,7 +1147,7 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - leaseID - Specifies the current lease ID on the resource.
// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -1089,7 +1156,7 @@ func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, o
if err != nil {
return ContainerClientRenewLeaseResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientRenewLeaseResponse{}, err
}
@ -1120,7 +1187,7 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1166,14 +1233,14 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co
// Restore - Restores a previously-deleted container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method.
func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) {
req, err := client.restoreCreateRequest(ctx, options)
if err != nil {
return ContainerClientRestoreResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientRestoreResponse{}, err
}
@ -1196,7 +1263,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1236,7 +1303,7 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta
// may be accessed publicly.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - containerACL - the acls for the container
// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy
// method.
@ -1247,7 +1314,7 @@ func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL
if err != nil {
return ContainerClientSetAccessPolicyResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientSetAccessPolicyResponse{}, err
}
@ -1282,7 +1349,7 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context,
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1291,7 +1358,10 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context,
XMLName xml.Name `xml:"SignedIdentifiers"`
ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"`
}
return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL})
if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil {
return nil, err
}
return req, nil
}
// setAccessPolicyHandleResponse handles the SetAccessPolicy response.
@ -1329,7 +1399,7 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response
// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@ -1338,7 +1408,7 @@ func (client *ContainerClient) SetMetadata(ctx context.Context, options *Contain
if err != nil {
return ContainerClientSetMetadataResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientSetMetadataResponse{}, err
}
@ -1374,7 +1444,7 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -1417,7 +1487,7 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C
// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
// value: multipart/mixed; boundary=batch_
@ -1428,7 +1498,7 @@ func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength in
if err != nil {
return ContainerClientSubmitBatchResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ContainerClientSubmitBatchResponse{}, err
}
@ -1454,12 +1524,15 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con
runtime.SkipBodyDownload(req)
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
req.Raw().Header["Content-Type"] = []string{multipartContentType}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, multipartContentType)
if err := req.SetBody(body, multipartContentType); err != nil {
return nil, err
}
return req, nil
}
// submitBatchHandleResponse handles the SubmitBatch response.

View file

@ -181,6 +181,8 @@ type BlobClientCopyFromURLOptions struct {
BlobTagsString *string
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string
// Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags.
CopySourceTags *BlobCopySourceTags
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
@ -554,6 +556,14 @@ type BlobItem struct {
VersionID *string `xml:"VersionId"`
}
type BlobName struct {
// The name of the blob.
Content *string `xml:",chardata"`
// Indicates if the blob name is encoded.
Encoded *bool `xml:"Encoded,attr"`
}
type BlobPrefix struct {
// REQUIRED
Name *string `xml:"Name"`
@ -689,6 +699,8 @@ type BlockBlobClientPutBlobFromURLOptions struct {
CopySourceAuthorization *string
// Optional, default is true. Indicates if properties from the source blob should be copied.
CopySourceBlobProperties *bool
// Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags.
CopySourceTags *BlobCopySourceTags
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination
// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
@ -767,6 +779,8 @@ type BlockBlobClientUploadOptions struct {
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
// Specify the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
}
@ -860,6 +874,30 @@ type ContainerClientDeleteOptions struct {
Timeout *int32
}
// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method.
type ContainerClientFilterBlobsOptions struct {
// Include this parameter to specify one or more datasets to include in the response.
Include []FilterBlobsIncludeItem
// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
// operation returns the NextMarker value within the response body if the listing
// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
// as the value for the marker parameter in a subsequent call to request the next
// page of list items. The marker value is opaque to the client.
Marker *string
// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
// greater than 5000, the server will return up to 5000 items. Note that if the
// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
// of the results. For this reason, it is possible that the service will
// return fewer results than specified by maxresults, or than the default of 5000.
Maxresults *int32
// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
RequestID *string
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
}
// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method.
type ContainerClientGetAccessPolicyOptions struct {
// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
@ -1140,10 +1178,12 @@ type FilterBlobItem struct {
ContainerName *string `xml:"ContainerName"`
// REQUIRED
Name *string `xml:"Name"`
Name *string `xml:"Name"`
IsCurrentVersion *bool `xml:"IsCurrentVersion"`
// Blob tags
Tags *BlobTags `xml:"Tags"`
Tags *BlobTags `xml:"Tags"`
VersionID *string `xml:"VersionId"`
}
// FilterBlobSegment - The result of a Filter Blobs API call
@ -1533,6 +1573,8 @@ type SequenceNumberAccessConditions struct {
// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method.
type ServiceClientFilterBlobsOptions struct {
// Include this parameter to specify one or more datasets to include in the response.
Include []FilterBlobsIncludeItem
// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
// operation returns the NextMarker value within the response body if the listing
// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
@ -1674,7 +1716,7 @@ type StaticWebsite struct {
}
type StorageError struct {
Message *string `json:"Message,omitempty"`
Message *string
}
// StorageServiceProperties - Storage Service Properties.

View file

@ -101,24 +101,6 @@ func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartEl
return enc.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem.
func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error {
type alias BlobItem
aux := &struct {
*alias
Metadata additionalProperties `xml:"Metadata"`
OrMetadata additionalProperties `xml:"OrMetadata"`
}{
alias: (*alias)(b),
}
if err := dec.DecodeElement(aux, &start); err != nil {
return err
}
b.Metadata = (map[string]*string)(aux.Metadata)
b.OrMetadata = (map[string]*string)(aux.OrMetadata)
return nil
}
// MarshalXML implements the xml.Marshaller interface for type BlobProperties.
func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
type alias BlobProperties
@ -470,6 +452,16 @@ func populate(m map[string]any, k string, v any) {
}
}
func populateAny(m map[string]any, k string, v any) {
if v == nil {
return
} else if azcore.IsNullValue(v) {
m[k] = nil
} else {
m[k] = v
}
}
func unpopulate(data json.RawMessage, fn string, v any) error {
if data == nil {
return nil

View file

@ -22,27 +22,16 @@ import (
)
// PageBlobClient contains the methods for the PageBlob group.
// Don't use this type directly, use NewPageBlobClient() instead.
// Don't use this type directly, use a constructor function instead.
type PageBlobClient struct {
internal *azcore.Client
endpoint string
pl runtime.Pipeline
}
// NewPageBlobClient creates a new instance of PageBlobClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - pl - the pipeline used for sending requests and handling responses.
func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient {
client := &PageBlobClient{
endpoint: endpoint,
pl: pl,
}
return client
}
// ClearPages - The Clear Pages operation clears a set of pages from a page blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -56,7 +45,7 @@ func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int6
if err != nil {
return PageBlobClientClearPagesResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientClearPagesResponse{}, err
}
@ -122,7 +111,7 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -190,7 +179,7 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag
// 2016-05-31.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@ -202,7 +191,7 @@ func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource st
if err != nil {
return PageBlobClientCopyIncrementalResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientCopyIncrementalResponse{}, err
}
@ -240,7 +229,7 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context,
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-copy-source"] = []string{copySource}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -289,7 +278,7 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response)
// Create - The Create operation creates a new page blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned
// to a 512-byte boundary.
@ -304,7 +293,7 @@ func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, b
if err != nil {
return PageBlobClientCreateResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientCreateResponse{}, err
}
@ -389,7 +378,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe
if options != nil && options.BlobSequenceNumber != nil {
req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -467,7 +456,7 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo
// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot
// of a page blob
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -488,7 +477,7 @@ func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPa
if err != nil {
return PageBlobClientGetPageRangesResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientGetPageRangesResponse{}, err
}
@ -542,7 +531,7 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -595,7 +584,7 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (
// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that
// were changed between target blob and previous snapshot.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@ -616,7 +605,7 @@ func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientG
if err != nil {
return PageBlobClientGetPageRangesDiffResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientGetPageRangesDiffResponse{}, err
}
@ -676,7 +665,7 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -729,7 +718,7 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons
// Resize - Resize the Blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned
// to a 512-byte boundary.
// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method.
@ -742,7 +731,7 @@ func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int6
if err != nil {
return PageBlobClientResizeResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientResizeResponse{}, err
}
@ -795,7 +784,7 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -845,7 +834,7 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo
// UpdateSequenceNumber - Update the sequence number of the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to
// page blobs only. This property indicates how the service should modify the blob's sequence number
// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber
@ -857,7 +846,7 @@ func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequence
if err != nil {
return PageBlobClientUpdateSequenceNumberResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientUpdateSequenceNumberResponse{}, err
}
@ -901,7 +890,7 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont
if options != nil && options.BlobSequenceNumber != nil {
req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -951,7 +940,7 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp
// UploadPages - The Upload Pages operation writes a range of pages to a page blob
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - body - Initial data
// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method.
@ -966,7 +955,7 @@ func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int
if err != nil {
return PageBlobClientUploadPagesResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientUploadPagesResponse{}, err
}
@ -1038,12 +1027,15 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/octet-stream")
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
return req, nil
}
// uploadPagesHandleResponse handles the UploadPages response.
@ -1116,7 +1108,7 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
// a URL
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - sourceURL - Specify a URL to the copy source.
// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header
// and x-ms-range/Range destination range header.
@ -1138,7 +1130,7 @@ func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL
if err != nil {
return PageBlobClientUploadPagesFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return PageBlobClientUploadPagesFromURLResponse{}, err
}
@ -1222,7 +1214,7 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}

View file

@ -266,6 +266,9 @@ type BlobClientCopyFromURLResponse struct {
// ETag contains the information returned from the ETag header response.
ETag *azcore.ETag
// EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
EncryptionScope *string
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
@ -407,6 +410,9 @@ type BlobClientDownloadResponse struct {
// CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response.
CopyStatusDescription *string
// CreationTime contains the information returned from the x-ms-creation-time header response.
CreationTime *time.Time
// Date contains the information returned from the Date header response.
Date *time.Time
@ -1310,6 +1316,22 @@ type ContainerClientDeleteResponse struct {
Version *string
}
// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs.
type ContainerClientFilterBlobsResponse struct {
FilterBlobSegment
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
// Date contains the information returned from the Date header response.
Date *time.Time `xml:"Date"`
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string `xml:"RequestID"`
// Version contains the information returned from the x-ms-version header response.
Version *string `xml:"Version"`
}
// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy.
type ContainerClientGetAccessPolicyResponse struct {
// BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response.

View file

@ -12,6 +12,7 @@ package generated
import (
"context"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"io"
@ -22,21 +23,10 @@ import (
)
// ServiceClient contains the methods for the Service group.
// Don't use this type directly, use NewServiceClient() instead.
// Don't use this type directly, use a constructor function instead.
type ServiceClient struct {
internal *azcore.Client
endpoint string
pl runtime.Pipeline
}
// NewServiceClient creates a new instance of ServiceClient with the specified values.
// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// - pl - the pipeline used for sending requests and handling responses.
func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient {
client := &ServiceClient{
endpoint: endpoint,
pl: pl,
}
return client
}
// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search
@ -44,7 +34,7 @@ func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient {
// be scoped within the expression to a single container.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - where - Filters the results to return only to return only blobs whose tags match the specified expression.
// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method.
func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) {
@ -52,7 +42,7 @@ func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, opti
if err != nil {
return ServiceClientFilterBlobsResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ServiceClientFilterBlobsResponse{}, err
}
@ -80,8 +70,11 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
if options != nil && options.Include != nil {
reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
}
req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -117,14 +110,14 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method.
func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) {
req, err := client.getAccountInfoCreateRequest(ctx, options)
if err != nil {
return ServiceClientGetAccountInfoResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ServiceClientGetAccountInfoResponse{}, err
}
@ -144,7 +137,7 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op
reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@ -188,14 +181,14 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (
// CORS (Cross-Origin Resource Sharing) rules.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method.
func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) {
req, err := client.getPropertiesCreateRequest(ctx, options)
if err != nil {
return ServiceClientGetPropertiesResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ServiceClientGetPropertiesResponse{}, err
}
@ -218,7 +211,7 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -248,14 +241,14 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S
// location endpoint when read-access geo-redundant replication is enabled for the storage account.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method.
func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) {
req, err := client.getStatisticsCreateRequest(ctx, options)
if err != nil {
return ServiceClientGetStatisticsResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ServiceClientGetStatisticsResponse{}, err
}
@ -278,7 +271,7 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -315,7 +308,7 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S
// bearer token authentication.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - keyInfo - Key information
// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey
// method.
@ -324,7 +317,7 @@ func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo K
if err != nil {
return ServiceClientGetUserDelegationKeyResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ServiceClientGetUserDelegationKeyResponse{}, err
}
@ -347,12 +340,15 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, runtime.MarshalAsXML(req, keyInfo)
if err := runtime.MarshalAsXML(req, keyInfo); err != nil {
return nil, err
}
return req, nil
}
// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response.
@ -383,7 +379,7 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo
// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified
// account
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager
// method.
//
@ -411,7 +407,7 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@ -441,7 +437,7 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp
// and CORS (Cross-Origin Resource Sharing) rules
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - storageServiceProperties - The StorageService properties.
// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method.
func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) {
@ -449,7 +445,7 @@ func (client *ServiceClient) SetProperties(ctx context.Context, storageServicePr
if err != nil {
return ServiceClientSetPropertiesResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ServiceClientSetPropertiesResponse{}, err
}
@ -472,12 +468,15 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, runtime.MarshalAsXML(req, storageServiceProperties)
if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil {
return nil, err
}
return req, nil
}
// setPropertiesHandleResponse handles the SetProperties response.
@ -498,7 +497,7 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S
// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
// If the operation fails it returns an *azcore.ResponseError type.
//
// Generated from API version 2020-10-02
// Generated from API version 2023-08-03
// - contentLength - The length of the request.
// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
// value: multipart/mixed; boundary=batch_
@ -509,7 +508,7 @@ func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int6
if err != nil {
return ServiceClientSubmitBatchResponse{}, err
}
resp, err := client.pl.Do(req)
resp, err := client.internal.Pipeline().Do(req)
if err != nil {
return ServiceClientSubmitBatchResponse{}, err
}
@ -534,12 +533,15 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte
runtime.SkipBodyDownload(req)
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
req.Raw().Header["Content-Type"] = []string{multipartContentType}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, multipartContentType)
if err := req.SetBody(body, multipartContentType); err != nil {
return nil, err
}
return req, nil
}
// submitBatchHandleResponse handles the SubmitBatch response.

View file

@ -11,10 +11,15 @@ import (
"errors"
)
const (
DefaultConcurrency = 5
)
// BatchTransferOptions identifies options used by doBatchTransfer.
type BatchTransferOptions struct {
TransferSize int64
ChunkSize int64
NumChunks uint16
Concurrency uint16
Operation func(ctx context.Context, offset int64, chunkSize int64) error
OperationName string
@ -28,13 +33,12 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
}
if o.Concurrency == 0 {
o.Concurrency = 5 // default concurrency
o.Concurrency = DefaultConcurrency // default concurrency
}
// Prepare and do parallel operations.
numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1)
operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently
operationResponseChannel := make(chan error, numChunks) // Holds each response
operationResponseChannel := make(chan error, o.NumChunks) // Holds each response
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -50,10 +54,10 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
}
// Add each chunk's operation to the channel.
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ {
curChunkSize := o.ChunkSize
if chunkNum == numChunks-1 { // Last chunk
if chunkNum == o.NumChunks-1 { // Last chunk
curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total
}
offset := int64(chunkNum) * o.ChunkSize
@ -65,7 +69,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
// Wait for the operations to complete.
var firstErr error = nil
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ {
responseError := <-operationResponseChannel
// record the first error (the original error which should cause the other chunks to fail with canceled context)
if responseError != nil && firstErr == nil {

View file

@ -0,0 +1,70 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package shared
type BufferManager[T ~[]byte] interface {
// Acquire returns the channel that contains the pool of buffers.
Acquire() <-chan T
// Release releases the buffer back to the pool for reuse/cleanup.
Release(T)
// Grow grows the number of buffers, up to the predefined max.
// It returns the total number of buffers or an error.
// No error is returned if the number of buffers has reached max.
// This is called only from the reading goroutine.
Grow() (int, error)
// Free cleans up all buffers.
Free()
}
// mmbPool implements the bufferManager interface.
// it uses anonymous memory mapped files for buffers.
// don't use this type directly, use newMMBPool() instead.
type mmbPool struct {
buffers chan Mmb
count int
max int
size int64
}
func NewMMBPool(maxBuffers int, bufferSize int64) BufferManager[Mmb] {
return &mmbPool{
buffers: make(chan Mmb, maxBuffers),
max: maxBuffers,
size: bufferSize,
}
}
func (pool *mmbPool) Acquire() <-chan Mmb {
return pool.buffers
}
func (pool *mmbPool) Grow() (int, error) {
if pool.count < pool.max {
buffer, err := NewMMB(pool.size)
if err != nil {
return 0, err
}
pool.buffers <- buffer
pool.count++
}
return pool.count, nil
}
func (pool *mmbPool) Release(buffer Mmb) {
pool.buffers <- buffer
}
func (pool *mmbPool) Free() {
for i := 0; i < pool.count; i++ {
buffer := <-pool.buffers
buffer.Delete()
}
pool.count = 0
}

View file

@ -5,7 +5,7 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
package shared
import (
"fmt"
@ -14,20 +14,20 @@ import (
)
// mmb is a memory mapped buffer
type mmb []byte
type Mmb []byte
// newMMB creates a new memory mapped buffer with the specified size
func newMMB(size int64) (mmb, error) {
func NewMMB(size int64) (Mmb, error) {
prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE
addr, err := syscall.Mmap(-1, 0, int(size), prot, flags)
if err != nil {
return nil, os.NewSyscallError("Mmap", err)
}
return mmb(addr), nil
return Mmb(addr), nil
}
// delete cleans up the memory mapped buffer
func (m *mmb) delete() {
func (m *Mmb) Delete() {
err := syscall.Munmap(*m)
*m = nil
if err != nil {

View file

@ -4,7 +4,7 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
package shared
import (
"fmt"
@ -14,11 +14,11 @@ import (
"unsafe"
)
// mmb is a memory mapped buffer
type mmb []byte
// Mmb is a memory mapped buffer
type Mmb []byte
// newMMB creates a new memory mapped buffer with the specified size
func newMMB(size int64) (mmb, error) {
// NewMMB creates a new memory mapped buffer with the specified size
func NewMMB(size int64) (Mmb, error) {
const InvalidHandleValue = ^uintptr(0) // -1
prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE)
@ -35,7 +35,7 @@ func newMMB(size int64) (mmb, error) {
return nil, os.NewSyscallError("MapViewOfFile", err)
}
m := mmb{}
m := Mmb{}
h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
h.Data = addr
h.Len = int(size)
@ -43,10 +43,10 @@ func newMMB(size int64) (mmb, error) {
return m, nil
}
// delete cleans up the memory mapped buffer
func (m *mmb) delete() {
// Delete cleans up the memory mapped buffer
func (m *Mmb) Delete() {
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
*m = mmb{}
*m = Mmb{}
err := syscall.UnmapViewOfFile(addr)
if err != nil {
// if we get here, there is likely memory corruption.

View file

@ -44,6 +44,15 @@ const (
const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5
const (
AppendBlobClient = "azblob/appendblob.Client"
BlobClient = "azblob/blob.Client"
BlockBlobClient = "azblob/blockblob.Client"
ContainerClient = "azblob/container.Client"
PageBlobClient = "azblob/pageblob.Client"
ServiceClient = "azblob/service.Client"
)
var CRC64Table = crc64.MakeTable(crc64Polynomial)
// CopyOptions returns a zero-value T if opts is nil.

View file

@ -37,10 +37,13 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient]
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -49,9 +52,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -61,10 +67,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client,
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil
azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewPageBlobClient(blobURL, azClient, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -119,7 +128,7 @@ func (pb *Client) WithSnapshot(snapshot string) (*Client, error) {
}
p.Snapshot = snapshot
return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil
return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil
}
// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
@ -131,7 +140,7 @@ func (pb *Client) WithVersionID(versionID string) (*Client, error) {
}
p.VersionID = versionID
return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil
return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil
}
// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob.
@ -228,7 +237,7 @@ func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[
if err != nil {
return GetPageRangesResponse{}, err
}
resp, err := pb.generated().Pipeline().Do(req)
resp, err := pb.generated().InternalClient().Pipeline().Do(req)
if err != nil {
return GetPageRangesResponse{}, err
}
@ -261,7 +270,7 @@ func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtim
if err != nil {
return GetPageRangesDiffResponse{}, err
}
resp, err := pb.generated().Pipeline().Do(req)
resp, err := pb.generated().InternalClient().Pipeline().Do(req)
if err != nil {
return GetPageRangesDiffResponse{}, err
}

View file

@ -25,13 +25,14 @@ type UserDelegationCredential = exported.UserDelegationCredential
// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
type AccountSignatureValues struct {
Version string `param:"sv"` // If not specified, this format to SASVersion
Protocol Protocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String()
IPRange IPRange `param:"sip"`
ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String()
Version string `param:"sv"` // If not specified, this format to SASVersion
Protocol Protocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String()
IPRange IPRange `param:"sip"`
ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String()
EncryptionScope string `param:"ses"`
}
// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce
@ -68,6 +69,7 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey
v.IPRange.String(),
string(v.Protocol),
v.Version,
v.EncryptionScope,
""}, // That is right, the account SAS requires a terminating extra newline
"\n")
@ -77,12 +79,13 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey
}
p := QueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
encryptionScope: v.EncryptionScope,
// Account-specific SAS parameters
services: "b", // will always be "b"

View file

@ -23,7 +23,7 @@ const (
var (
// Version is the default version encoded in the SAS token.
Version = "2020-02-10"
Version = "2021-12-02"
)
// TimeFormats ISO 8601 format.
@ -143,6 +143,7 @@ type QueryParameters struct {
authorizedObjectID string `param:"saoid"`
unauthorizedObjectID string `param:"suoid"`
correlationID string `param:"scid"`
encryptionScope string `param:"ses"`
// private member used for startTime and expiryTime formatting.
stTimeFormat string
seTimeFormat string
@ -163,6 +164,11 @@ func (p *QueryParameters) SignedCorrelationID() string {
return p.correlationID
}
// EncryptionScope returns encryptionScope
func (p *QueryParameters) EncryptionScope() string {
return p.encryptionScope
}
// SignedOID returns signedOID.
func (p *QueryParameters) SignedOID() string {
return p.signedOID
@ -355,6 +361,9 @@ func (p *QueryParameters) Encode() string {
if p.correlationID != "" {
v.Add("scid", p.correlationID)
}
if p.encryptionScope != "" {
v.Add("ses", p.encryptionScope)
}
return v.Encode()
}
@ -429,6 +438,8 @@ func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) Q
p.unauthorizedObjectID = val
case "scid":
p.correlationID = val
case "ses":
p.encryptionScope = val
default:
isSASKey = false // We didn't recognize the query parameter
}

View file

@ -40,6 +40,7 @@ type BlobSignatureValues struct {
AuthorizedObjectID string // saoid
UnauthorizedObjectID string // suoid
CorrelationID string // scid
EncryptionScope string `param:"ses"`
}
func getDirectoryDepth(path string) string {
@ -51,17 +52,11 @@ func getDirectoryDepth(path string) string {
// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters.
func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) {
if v.ExpiryTime.IsZero() || v.Permissions == "" {
if v.Identifier == "" && (v.ExpiryTime.IsZero() || v.Permissions == "") {
return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions")
}
//Make sure the permission characters are in the correct order
perms, err := parseBlobPermissions(v.Permissions)
if err != nil {
return QueryParameters{}, err
}
v.Permissions = perms.String()
// Parse the resource
resource := "c"
if !v.SnapshotTime.IsZero() {
resource = "bs"
@ -76,6 +71,21 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre
resource = "b"
}
// make sure the permission characters are in the correct order
if resource == "c" {
perms, err := parseContainerPermissions(v.Permissions)
if err != nil {
return QueryParameters{}, err
}
v.Permissions = perms.String()
} else {
perms, err := parseBlobPermissions(v.Permissions)
if err != nil {
return QueryParameters{}, err
}
v.Permissions = perms.String()
}
if v.Version == "" {
v.Version = Version
}
@ -94,7 +104,8 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre
string(v.Protocol),
v.Version,
resource,
snapshotTime, // signed timestamp
snapshotTime, // signed timestamp
v.EncryptionScope,
v.CacheControl, // rscc
v.ContentDisposition, // rscd
v.ContentEncoding, // rsce
@ -109,12 +120,13 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre
p := QueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
encryptionScope: v.EncryptionScope,
// Container/Blob-specific SAS parameters
resource: resource,
@ -202,7 +214,8 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us
string(v.Protocol),
v.Version,
resource,
snapshotTime, // signed timestamp
snapshotTime, // signed timestamp
v.EncryptionScope,
v.CacheControl, // rscc
v.ContentDisposition, // rscd
v.ContentEncoding, // rsce
@ -217,12 +230,13 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us
p := QueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
encryptionScope: v.EncryptionScope,
// Container/Blob-specific SAS parameters
resource: resource,

View file

@ -42,10 +42,13 @@ type Client base.Client[generated.ServiceClient]
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewServiceClient(serviceURL, pl, &cred)), nil
azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -54,9 +57,12 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil
azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewServiceClient(serviceURL, azClient, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
@ -66,10 +72,14 @@ func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Clie
func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil
azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
if err != nil {
return nil, err
}
return (*Client)(base.NewServiceClient(serviceURL, azClient, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
@ -135,7 +145,7 @@ func (s *Client) URL() string {
// this Client's URL. The new container.Client uses the same request policy pipeline as the Client.
func (s *Client) NewContainerClient(containerName string) *container.Client {
containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName)
return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.credential()))
return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(shared.ContainerClient), s.credential()))
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
@ -184,6 +194,9 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager
if o.Include.Metadata {
listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata)
}
if o.Include.System {
listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeSystem)
}
listOptions.Marker = o.Marker
listOptions.Maxresults = o.MaxResults
listOptions.Prefix = o.Prefix
@ -204,7 +217,7 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager
if err != nil {
return ListContainersResponse{}, err
}
resp, err := s.generated().Pipeline().Do(req)
resp, err := s.generated().InternalClient().Pipeline().Do(req)
if err != nil {
return ListContainersResponse{}, err
}

View file

@ -156,6 +156,9 @@ type ListContainersInclude struct {
// Tells the service whether to return soft-deleted containers.
Deleted bool
// Tells the service whether to return system containers.
System bool
}
// ---------------------------------------------------------------------------------------------------------------------

View file

@ -5,7 +5,7 @@
// Package cmp determines equality of values.
//
// This package is intended to be a more powerful and safer alternative to
// reflect.DeepEqual for comparing whether two values are semantically equal.
// [reflect.DeepEqual] for comparing whether two values are semantically equal.
// It is intended to only be used in tests, as performance is not a goal and
// it may panic if it cannot compare the values. Its propensity towards
// panicking means that its unsuitable for production environments where a
@ -18,16 +18,17 @@
// For example, an equality function may report floats as equal so long as
// they are within some tolerance of each other.
//
// - Types with an Equal method may use that method to determine equality.
// This allows package authors to determine the equality operation
// for the types that they define.
// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
// to determine equality. This allows package authors to determine
// the equality operation for the types that they define.
//
// - If no custom equality functions are used and no Equal method is defined,
// equality is determined by recursively comparing the primitive kinds on
// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
// unexported fields are not compared by default; they result in panics
// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
// or explicitly compared using the Exporter option.
// unless suppressed by using an [Ignore] option
// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
// or explicitly compared using the [Exporter] option.
package cmp
import (
@ -45,14 +46,14 @@ import (
// Equal reports whether x and y are equal by recursively applying the
// following rules in the given order to x and y and all of their sub-values:
//
// - Let S be the set of all Ignore, Transformer, and Comparer options that
// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
// remain after applying all path filters, value filters, and type filters.
// If at least one Ignore exists in S, then the comparison is ignored.
// If the number of Transformer and Comparer options in S is non-zero,
// If at least one [Ignore] exists in S, then the comparison is ignored.
// If the number of [Transformer] and [Comparer] options in S is non-zero,
// then Equal panics because it is ambiguous which option to use.
// If S contains a single Transformer, then use that to transform
// If S contains a single [Transformer], then use that to transform
// the current values and recursively call Equal on the output values.
// If S contains a single Comparer, then use that to compare the current values.
// If S contains a single [Comparer], then use that to compare the current values.
// Otherwise, evaluation proceeds to the next rule.
//
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
@ -66,21 +67,22 @@ import (
// Functions are only equal if they are both nil, otherwise they are unequal.
//
// Structs are equal if recursively calling Equal on all fields report equal.
// If a struct contains unexported fields, Equal panics unless an Ignore option
// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option
// explicitly permits comparing the unexported field.
// If a struct contains unexported fields, Equal panics unless an [Ignore] option
// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
// or the [Exporter] option explicitly permits comparing the unexported field.
//
// Slices are equal if they are both nil or both non-nil, where recursively
// calling Equal on all non-ignored slice or array elements report equal.
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
// consider using cmpopts.EquateEmpty.
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
//
// Maps are equal if they are both nil or both non-nil, where recursively
// calling Equal on all non-ignored map entries report equal.
// Map keys are equal according to the == operator.
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
// To use custom comparisons for map keys, consider using
// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
// consider using cmpopts.EquateEmpty.
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
//
// Pointers and interfaces are equal if they are both nil or both non-nil,
// where they have the same underlying concrete type and recursively

View file

@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
package cmp
import (
@ -12,8 +9,6 @@ import (
"unsafe"
)
const supportExporters = true
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
// a struct such that the value has read-write permissions.
//

View file

@ -1,16 +0,0 @@
// Copyright 2017, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego
// +build purego
package cmp
import "reflect"
const supportExporters = false
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
panic("no support for forcibly accessing unexported fields")
}

View file

@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
package value
import (

View file

@ -1,34 +0,0 @@
// Copyright 2018, The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego
// +build purego
package value
import "reflect"
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
type Pointer struct {
p uintptr
t reflect.Type
}
// PointerOf returns a Pointer from v, which must be a
// reflect.Ptr, reflect.Slice, or reflect.Map.
func PointerOf(v reflect.Value) Pointer {
// NOTE: Storing a pointer as an uintptr is technically incorrect as it
// assumes that the GC implementation does not use a moving collector.
return Pointer{v.Pointer(), v.Type()}
}
// IsNil reports whether the pointer is nil.
func (p Pointer) IsNil() bool {
return p.p == 0
}
// Uintptr returns the pointer as a uintptr.
func (p Pointer) Uintptr() uintptr {
return p.p
}

View file

@ -13,15 +13,15 @@ import (
"github.com/google/go-cmp/cmp/internal/function"
)
// Option configures for specific behavior of Equal and Diff. In particular,
// the fundamental Option functions (Ignore, Transformer, and Comparer),
// Option configures for specific behavior of [Equal] and [Diff]. In particular,
// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
// configure how equality is determined.
//
// The fundamental options may be composed with filters (FilterPath and
// FilterValues) to control the scope over which they are applied.
// The fundamental options may be composed with filters ([FilterPath] and
// [FilterValues]) to control the scope over which they are applied.
//
// The cmp/cmpopts package provides helper functions for creating options that
// may be used with Equal and Diff.
// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
// for creating options that may be used with [Equal] and [Diff].
type Option interface {
// filter applies all filters and returns the option that remains.
// Each option may only read s.curPath and call s.callTTBFunc.
@ -56,9 +56,9 @@ type core struct{}
func (core) isCore() {}
// Options is a list of Option values that also satisfies the Option interface.
// Options is a list of [Option] values that also satisfies the [Option] interface.
// Helper comparison packages may return an Options value when packing multiple
// Option values into a single Option. When this package processes an Options,
// [Option] values into a single [Option]. When this package processes an Options,
// it will be implicitly expanded into a flat list.
//
// Applying a filter on an Options is equivalent to applying that same filter
@ -105,16 +105,16 @@ func (opts Options) String() string {
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
}
// FilterPath returns a new Option where opt is only evaluated if filter f
// returns true for the current Path in the value tree.
// FilterPath returns a new [Option] where opt is only evaluated if filter f
// returns true for the current [Path] in the value tree.
//
// This filter is called even if a slice element or map entry is missing and
// provides an opportunity to ignore such cases. The filter function must be
// symmetric such that the filter result is identical regardless of whether the
// missing value is from x or y.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
// a previously filtered [Option].
func FilterPath(f func(Path) bool, opt Option) Option {
if f == nil {
panic("invalid path filter function")
@ -142,7 +142,7 @@ func (f pathFilter) String() string {
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
}
// FilterValues returns a new Option where opt is only evaluated if filter f,
// FilterValues returns a new [Option] where opt is only evaluated if filter f,
// which is a function of the form "func(T, T) bool", returns true for the
// current pair of values being compared. If either value is invalid or
// the type of the values is not assignable to T, then this filter implicitly
@ -154,8 +154,8 @@ func (f pathFilter) String() string {
// If T is an interface, it is possible that f is called with two values with
// different concrete types that both implement T.
//
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
// a previously filtered Option.
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
// a previously filtered [Option].
func FilterValues(f interface{}, opt Option) Option {
v := reflect.ValueOf(f)
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
@ -192,9 +192,9 @@ func (f valuesFilter) String() string {
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
}
// Ignore is an Option that causes all comparisons to be ignored.
// This value is intended to be combined with FilterPath or FilterValues.
// It is an error to pass an unfiltered Ignore option to Equal.
// Ignore is an [Option] that causes all comparisons to be ignored.
// This value is intended to be combined with [FilterPath] or [FilterValues].
// It is an error to pass an unfiltered Ignore option to [Equal].
func Ignore() Option { return ignore{} }
type ignore struct{ core }
@ -234,6 +234,8 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values"
} else if t.Comparable() {
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
}
} else {
// Unnamed type with unexported fields. Derive PkgPath from field.
@ -254,7 +256,7 @@ const identRx = `[_\p{L}][_\p{L}\p{N}]*`
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
// Transformer returns an Option that applies a transformation function that
// Transformer returns an [Option] that applies a transformation function that
// converts values of a certain type into that of another.
//
// The transformer f must be a function "func(T) R" that converts values of
@ -265,13 +267,14 @@ var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
// same transform to the output of itself (e.g., in the case where the
// input and output types are the same), an implicit filter is added such that
// a transformer is applicable only if that exact transformer is not already
// in the tail of the Path since the last non-Transform step.
// in the tail of the [Path] since the last non-[Transform] step.
// For situations where the implicit filter is still insufficient,
// consider using cmpopts.AcyclicTransformer, which adds a filter
// to prevent the transformer from being recursively applied upon itself.
// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
// which adds a filter to prevent the transformer from
// being recursively applied upon itself.
//
// The name is a user provided label that is used as the Transform.Name in the
// transformation PathStep (and eventually shown in the Diff output).
// The name is a user provided label that is used as the [Transform.Name] in the
// transformation [PathStep] (and eventually shown in the [Diff] output).
// The name must be a valid identifier or qualified identifier in Go syntax.
// If empty, an arbitrary name is used.
func Transformer(name string, f interface{}) Option {
@ -329,7 +332,7 @@ func (tr transformer) String() string {
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
}
// Comparer returns an Option that determines whether two values are equal
// Comparer returns an [Option] that determines whether two values are equal
// to each other.
//
// The comparer f must be a function "func(T, T) bool" and is implicitly
@ -377,35 +380,32 @@ func (cm comparer) String() string {
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
}
// Exporter returns an Option that specifies whether Equal is allowed to
// Exporter returns an [Option] that specifies whether [Equal] is allowed to
// introspect into the unexported fields of certain struct types.
//
// Users of this option must understand that comparing on unexported fields
// from external packages is not safe since changes in the internal
// implementation of some external package may cause the result of Equal
// implementation of some external package may cause the result of [Equal]
// to unexpectedly change. However, it may be valid to use this option on types
// defined in an internal package where the semantic meaning of an unexported
// field is in the control of the user.
//
// In many cases, a custom Comparer should be used instead that defines
// In many cases, a custom [Comparer] should be used instead that defines
// equality as a function of the public API of a type rather than the underlying
// unexported implementation.
//
// For example, the reflect.Type documentation defines equality to be determined
// For example, the [reflect.Type] documentation defines equality to be determined
// by the == operator on the interface (essentially performing a shallow pointer
// comparison) and most attempts to compare *regexp.Regexp types are interested
// comparison) and most attempts to compare *[regexp.Regexp] types are interested
// in only checking that the regular expression strings are equal.
// Both of these are accomplished using Comparers:
// Both of these are accomplished using [Comparer] options:
//
// Comparer(func(x, y reflect.Type) bool { return x == y })
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
//
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
// all unexported fields on specified struct types.
// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
// option can be used to ignore all unexported fields on specified struct types.
func Exporter(f func(reflect.Type) bool) Option {
if !supportExporters {
panic("Exporter is not supported on purego builds")
}
return exporter(f)
}
@ -415,10 +415,10 @@ func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableO
panic("not implemented")
}
// AllowUnexported returns an Options that allows Equal to forcibly introspect
// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
// unexported fields of the specified struct types.
//
// See Exporter for the proper use of this option.
// See [Exporter] for the proper use of this option.
func AllowUnexported(types ...interface{}) Option {
m := make(map[reflect.Type]bool)
for _, typ := range types {
@ -432,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option {
}
// Result represents the comparison result for a single node and
// is provided by cmp when calling Report (see Reporter).
// is provided by cmp when calling Report (see [Reporter]).
type Result struct {
_ [0]func() // Make Result incomparable
flags resultFlags
@ -445,7 +445,7 @@ func (r Result) Equal() bool {
}
// ByIgnore reports whether the node is equal because it was ignored.
// This never reports true if Equal reports false.
// This never reports true if [Result.Equal] reports false.
func (r Result) ByIgnore() bool {
return r.flags&reportByIgnore != 0
}
@ -455,7 +455,7 @@ func (r Result) ByMethod() bool {
return r.flags&reportByMethod != 0
}
// ByFunc reports whether a Comparer function determined equality.
// ByFunc reports whether a [Comparer] function determined equality.
func (r Result) ByFunc() bool {
return r.flags&reportByFunc != 0
}
@ -478,7 +478,7 @@ const (
reportByCycle
)
// Reporter is an Option that can be passed to Equal. When Equal traverses
// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
// the value trees, it calls PushStep as it descends into each node in the
// tree and PopStep as it ascend out of the node. The leaves of the tree are
// either compared (determined to be equal or not equal) or ignored and reported

View file

@ -14,9 +14,9 @@ import (
"github.com/google/go-cmp/cmp/internal/value"
)
// Path is a list of PathSteps describing the sequence of operations to get
// Path is a list of [PathStep] describing the sequence of operations to get
// from some root type to the current position in the value tree.
// The first Path element is always an operation-less PathStep that exists
// The first Path element is always an operation-less [PathStep] that exists
// simply to identify the initial type.
//
// When traversing structs with embedded structs, the embedded struct will
@ -29,8 +29,13 @@ type Path []PathStep
// a value's tree structure. Users of this package never need to implement
// these types as values of this type will be returned by this package.
//
// Implementations of this interface are
// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
// Implementations of this interface:
// - [StructField]
// - [SliceIndex]
// - [MapIndex]
// - [Indirect]
// - [TypeAssertion]
// - [Transform]
type PathStep interface {
String() string
@ -70,8 +75,9 @@ func (pa *Path) pop() {
*pa = (*pa)[:len(*pa)-1]
}
// Last returns the last PathStep in the Path.
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
// Last returns the last [PathStep] in the Path.
// If the path is empty, this returns a non-nil [PathStep]
// that reports a nil [PathStep.Type].
func (pa Path) Last() PathStep {
return pa.Index(-1)
}
@ -79,7 +85,8 @@ func (pa Path) Last() PathStep {
// Index returns the ith step in the Path and supports negative indexing.
// A negative index starts counting from the tail of the Path such that -1
// refers to the last step, -2 refers to the second-to-last step, and so on.
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
// If index is invalid, this returns a non-nil [PathStep]
// that reports a nil [PathStep.Type].
func (pa Path) Index(i int) PathStep {
if i < 0 {
i = len(pa) + i
@ -168,7 +175,8 @@ func (ps pathStep) String() string {
return fmt.Sprintf("{%s}", s)
}
// StructField represents a struct field access on a field called Name.
// StructField is a [PathStep] that represents a struct field access
// on a field called [StructField.Name].
type StructField struct{ *structField }
type structField struct {
pathStep
@ -204,10 +212,11 @@ func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
func (sf StructField) Name() string { return sf.name }
// Index is the index of the field in the parent struct type.
// See reflect.Type.Field.
// See [reflect.Type.Field].
func (sf StructField) Index() int { return sf.idx }
// SliceIndex is an index operation on a slice or array at some index Key.
// SliceIndex is a [PathStep] that represents an index operation on
// a slice or array at some index [SliceIndex.Key].
type SliceIndex struct{ *sliceIndex }
type sliceIndex struct {
pathStep
@ -247,12 +256,12 @@ func (si SliceIndex) Key() int {
// all of the indexes to be shifted. If an index is -1, then that
// indicates that the element does not exist in the associated slice.
//
// Key is guaranteed to return -1 if and only if the indexes returned
// by SplitKeys are not the same. SplitKeys will never return -1 for
// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
// returned by SplitKeys are not the same. SplitKeys will never return -1 for
// both indexes.
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
// MapIndex is an index operation on a map at some index Key.
// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
type MapIndex struct{ *mapIndex }
type mapIndex struct {
pathStep
@ -266,7 +275,7 @@ func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]",
// Key is the value of the map key.
func (mi MapIndex) Key() reflect.Value { return mi.key }
// Indirect represents pointer indirection on the parent type.
// Indirect is a [PathStep] that represents pointer indirection on the parent type.
type Indirect struct{ *indirect }
type indirect struct {
pathStep
@ -276,7 +285,7 @@ func (in Indirect) Type() reflect.Type { return in.typ }
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
func (in Indirect) String() string { return "*" }
// TypeAssertion represents a type assertion on an interface.
// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
type TypeAssertion struct{ *typeAssertion }
type typeAssertion struct {
pathStep
@ -286,7 +295,8 @@ func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
// Transform is a transformation from the parent type to the current type.
// Transform is a [PathStep] that represents a transformation
// from the parent type to the current type.
type Transform struct{ *transform }
type transform struct {
pathStep
@ -297,13 +307,13 @@ func (tf Transform) Type() reflect.Type { return tf.typ }
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
// Name is the name of the Transformer.
// Name is the name of the [Transformer].
func (tf Transform) Name() string { return tf.trans.name }
// Func is the function pointer to the transformer function.
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
// Option returns the originally constructed Transformer option.
// Option returns the originally constructed [Transformer] option.
// The == operator can be used to detect the exact option used.
func (tf Transform) Option() Option { return tf.trans }

View file

@ -199,7 +199,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
break
}
sf := t.Field(i)
if supportExporters && !isExported(sf.Name) {
if !isExported(sf.Name) {
vv = retrieveUnexportedField(v, sf, true)
}
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)

View file

@ -1,5 +1,21 @@
# Changelog
## v4.11.2 - 2023-10-11
**Security**
* Bump golang.org/x/net to prevent CVE-2023-39325 / CVE-2023-44487 HTTP/2 Rapid Reset Attack [#2527](https://github.com/labstack/echo/pull/2527)
* fix(sec): randomString bias introduced by #2490 [#2492](https://github.com/labstack/echo/pull/2492)
* CSRF/RequestID mw: switch math/random usage to crypto/random [#2490](https://github.com/labstack/echo/pull/2490)
**Enhancements**
* Delete unused context in body_limit.go [#2483](https://github.com/labstack/echo/pull/2483)
* Use Go 1.21 in CI [#2505](https://github.com/labstack/echo/pull/2505)
* Fix some typos [#2511](https://github.com/labstack/echo/pull/2511)
* Allow CORS middleware to send Access-Control-Max-Age: 0 [#2518](https://github.com/labstack/echo/pull/2518)
* Bump dependancies [#2522](https://github.com/labstack/echo/pull/2522)
## v4.11.1 - 2023-07-16
**Fixes**

View file

@ -3,7 +3,7 @@
[![Sourcegraph](https://sourcegraph.com/github.com/labstack/echo/-/badge.svg?style=flat-square)](https://sourcegraph.com/github.com/labstack/echo?badge)
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/labstack/echo/v4)
[![Go Report Card](https://goreportcard.com/badge/github.com/labstack/echo?style=flat-square)](https://goreportcard.com/report/github.com/labstack/echo)
[![Build Status](http://img.shields.io/travis/labstack/echo.svg?style=flat-square)](https://travis-ci.org/labstack/echo)
[![GitHub Workflow Status (with event)](https://img.shields.io/github/actions/workflow/status/labstack/echo/echo.yml?style=flat-square)](https://github.com/labstack/echo/actions)
[![Codecov](https://img.shields.io/codecov/c/github/labstack/echo.svg?style=flat-square)](https://codecov.io/gh/labstack/echo)
[![Forum](https://img.shields.io/badge/community-forum-00afd1.svg?style=flat-square)](https://github.com/labstack/echo/discussions)
[![Twitter](https://img.shields.io/badge/twitter-@labstack-55acee.svg?style=flat-square)](https://twitter.com/labstack)

View file

@ -259,7 +259,7 @@ const (
const (
// Version of Echo
Version = "4.11.1"
Version = "4.11.2"
website = "https://echo.labstack.com"
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
banner = `

View file

@ -23,9 +23,8 @@ type (
limitedReader struct {
BodyLimitConfig
reader io.ReadCloser
read int64
context echo.Context
reader io.ReadCloser
read int64
}
)
@ -80,7 +79,7 @@ func BodyLimitWithConfig(config BodyLimitConfig) echo.MiddlewareFunc {
// Based on content read
r := pool.Get().(*limitedReader)
r.Reset(req.Body, c)
r.Reset(req.Body)
defer pool.Put(r)
req.Body = r
@ -102,9 +101,8 @@ func (r *limitedReader) Close() error {
return r.reader.Close()
}
func (r *limitedReader) Reset(reader io.ReadCloser, context echo.Context) {
func (r *limitedReader) Reset(reader io.ReadCloser) {
r.reader = reader
r.context = context
r.read = 0
}

View file

@ -13,7 +13,7 @@ type ContextTimeoutConfig struct {
// Skipper defines a function to skip middleware.
Skipper Skipper
// ErrorHandler is a function when error aries in middeware execution.
// ErrorHandler is a function when error aries in middleware execution.
ErrorHandler func(err error, c echo.Context) error
// Timeout configures a timeout for the middleware, defaults to 0 for no timeout

View file

@ -99,8 +99,9 @@ type (
// MaxAge determines the value of the Access-Control-Max-Age response header.
// This header indicates how long (in seconds) the results of a preflight
// request can be cached.
// The header is set only if MaxAge != 0, negative value sends "0" which instructs browsers not to cache that response.
//
// Optional. Default value 0. The header is set only if MaxAge > 0.
// Optional. Default value 0 - meaning header is not sent.
//
// See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age
MaxAge int `yaml:"max_age"`
@ -159,7 +160,11 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc {
allowMethods := strings.Join(config.AllowMethods, ",")
allowHeaders := strings.Join(config.AllowHeaders, ",")
exposeHeaders := strings.Join(config.ExposeHeaders, ",")
maxAge := strconv.Itoa(config.MaxAge)
maxAge := "0"
if config.MaxAge > 0 {
maxAge = strconv.Itoa(config.MaxAge)
}
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
@ -282,7 +287,7 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc {
res.Header().Set(echo.HeaderAccessControlAllowHeaders, h)
}
}
if config.MaxAge > 0 {
if config.MaxAge != 0 {
res.Header().Set(echo.HeaderAccessControlMaxAge, maxAge)
}
return c.NoContent(http.StatusNoContent)

View file

@ -6,7 +6,6 @@ import (
"time"
"github.com/labstack/echo/v4"
"github.com/labstack/gommon/random"
)
type (
@ -103,6 +102,7 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc {
if config.TokenLength == 0 {
config.TokenLength = DefaultCSRFConfig.TokenLength
}
if config.TokenLookup == "" {
config.TokenLookup = DefaultCSRFConfig.TokenLookup
}
@ -132,7 +132,7 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc {
token := ""
if k, err := c.Cookie(config.CookieName); err != nil {
token = random.String(config.TokenLength) // Generate token
token = randomString(config.TokenLength)
} else {
token = k.Value // Reuse token
}

View file

@ -2,7 +2,6 @@ package middleware
import (
"github.com/labstack/echo/v4"
"github.com/labstack/gommon/random"
)
type (
@ -12,7 +11,7 @@ type (
Skipper Skipper
// Generator defines a function to generate an ID.
// Optional. Default value random.String(32).
// Optional. Defaults to generator for random string of length 32.
Generator func() string
// RequestIDHandler defines a function which is executed for a request id.
@ -73,5 +72,5 @@ func RequestIDWithConfig(config RequestIDConfig) echo.MiddlewareFunc {
}
func generator() string {
return random.String(32)
return randomString(32)
}

View file

@ -1,7 +1,11 @@
package middleware
import (
"bufio"
"crypto/rand"
"io"
"strings"
"sync"
)
func matchScheme(domain, pattern string) bool {
@ -52,3 +56,45 @@ func matchSubdomain(domain, pattern string) bool {
}
return false
}
// https://tip.golang.org/doc/go1.19#:~:text=Read%20no%20longer%20buffers%20random%20data%20obtained%20from%20the%20operating%20system%20between%20calls
var randomReaderPool = sync.Pool{New: func() interface{} {
return bufio.NewReader(rand.Reader)
}}
const randomStringCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
const randomStringCharsetLen = 52 // len(randomStringCharset)
const randomStringMaxByte = 255 - (256 % randomStringCharsetLen)
func randomString(length uint8) string {
reader := randomReaderPool.Get().(*bufio.Reader)
defer randomReaderPool.Put(reader)
b := make([]byte, length)
r := make([]byte, length+(length/4)) // perf: avoid read from rand.Reader many times
var i uint8 = 0
// security note:
// we can't just simply do b[i]=randomStringCharset[rb%len(randomStringCharset)],
// len(len(randomStringCharset)) is 52, and rb is [0, 255], 256 = 52 * 4 + 48.
// make the first 48 characters more possibly to be generated then others.
// So we have to skip bytes when rb > randomStringMaxByte
for {
_, err := io.ReadFull(reader, r)
if err != nil {
panic("unexpected error happened when reading from bufio.NewReader(crypto/rand.Reader)")
}
for _, rb := range r {
if rb > randomStringMaxByte {
// Skip this number to avoid bias.
continue
}
b[i] = randomStringCharset[rb%randomStringCharsetLen]
i++
if i == length {
return string(b)
}
}
}
}

View file

@ -1,48 +0,0 @@
package random
import (
"math/rand"
"strings"
"time"
)
type (
Random struct {
}
)
// Charsets
const (
Uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
Lowercase = "abcdefghijklmnopqrstuvwxyz"
Alphabetic = Uppercase + Lowercase
Numeric = "0123456789"
Alphanumeric = Alphabetic + Numeric
Symbols = "`" + `~!@#$%^&*()-_+={}[]|\;:"<>,./?`
Hex = Numeric + "abcdef"
)
var (
global = New()
)
func New() *Random {
rand.Seed(time.Now().UnixNano())
return new(Random)
}
func (r *Random) String(length uint8, charsets ...string) string {
charset := strings.Join(charsets, "")
if charset == "" {
charset = Alphanumeric
}
b := make([]byte, length)
for i := range b {
b[i] = charset[rand.Int63()%int64(len(charset))]
}
return string(b)
}
func String(length uint8, charsets ...string) string {
return global.String(length, charsets...)
}

View file

@ -20,6 +20,7 @@ import (
"time"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/types/known/timestamppb"
)
// Counter is a Metric that represents a single numerical value that only ever
@ -66,7 +67,7 @@ type CounterVecOpts struct {
CounterOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
@ -90,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
if opts.now == nil {
opts.now = time.Now
}
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
result.init(result) // Init self-collection.
result.createdTs = timestamppb.New(opts.now())
return result
}
@ -106,10 +111,12 @@ type counter struct {
selfCollector
desc *Desc
createdTs *timestamppb.Timestamp
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
func (c *counter) Desc() *Desc {
@ -159,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error {
exemplar = e.(*dto.Exemplar)
}
val := c.get()
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
}
func (c *counter) updateExemplar(v float64, l Labels) {
@ -200,13 +206,17 @@ func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
opts.VariableLabels,
opts.ConstLabels,
)
if opts.now == nil {
opts.now = time.Now
}
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
if len(lvs) != len(desc.variableLabels.names) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
result.init(result) // Init self-collection.
result.createdTs = timestamppb.New(opts.now())
return result
}),
}

View file

@ -52,7 +52,7 @@ type Desc struct {
constLabelPairs []*dto.LabelPair
// variableLabels contains names of labels and normalization function for
// which the metric maintains variable values.
variableLabels ConstrainedLabels
variableLabels *compiledLabels
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
@ -93,7 +93,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
d := &Desc{
fqName: fqName,
help: help,
variableLabels: variableLabels.constrainedLabels(),
variableLabels: variableLabels.compile(),
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@ -103,7 +103,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels))
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
@ -128,13 +128,13 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
for _, label := range d.variableLabels {
if !checkLabelName(label.Name) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName)
for _, label := range d.variableLabels.names {
if !checkLabelName(label) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
return d
}
labelNames = append(labelNames, "$"+label.Name)
labelNameSet[label.Name] = struct{}{}
labelNames = append(labelNames, "$"+label)
labelNameSet[label] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
@ -189,11 +189,19 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
vlStrings := make([]string, 0, len(d.variableLabels.names))
for _, vl := range d.variableLabels.names {
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
} else {
vlStrings = append(vlStrings, vl)
}
}
return fmt.Sprintf(
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
d.variableLabels,
strings.Join(vlStrings, ","),
)
}

View file

@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) {
continue
}
var v interface{}
labels := make([]string, len(desc.variableLabels))
labels := make([]string, len(desc.variableLabels.names))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err)
continue

View file

@ -62,7 +62,7 @@ type GaugeVecOpts struct {
GaugeOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
@ -135,7 +135,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@ -166,8 +166,8 @@ func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
if len(lvs) != len(desc.variableLabels.names) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.

View file

@ -25,6 +25,7 @@ import (
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
// nativeHistogramBounds for the frac of observed values. Only relevant for
@ -391,7 +392,7 @@ type HistogramOpts struct {
// zero, it is replaced by default buckets. The default buckets are
// DefBuckets if no buckets for a native histogram (see below) are used,
// otherwise the default is no buckets. (In other words, if you want to
// use both reguler buckets and buckets for a native histogram, you have
// use both regular buckets and buckets for a native histogram, you have
// to define the regular buckets here explicitly.)
Buckets []float64
@ -413,8 +414,8 @@ type HistogramOpts struct {
// and 2, same as between 2 and 4, and 4 and 8, etc.).
//
// Details about the actually used factor: The factor is calculated as
// 2^(2^n), where n is an integer number between (and including) -8 and
// 4. n is chosen so that the resulting factor is the largest that is
// 2^(2^-n), where n is an integer number between (and including) -4 and
// 8. n is chosen so that the resulting factor is the largest that is
// still smaller or equal to NativeHistogramBucketFactor. Note that the
// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
@ -428,12 +429,12 @@ type HistogramOpts struct {
// a major version bump.
NativeHistogramBucketFactor float64
// All observations with an absolute value of less or equal
// NativeHistogramZeroThreshold are accumulated into a “zero”
// bucket. For best results, this should be close to a bucket
// boundary. This is usually the case if picking a power of two. If
// NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
// For best results, this should be close to a bucket boundary. This is
// usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero,
// DefNativeHistogramZeroThreshold is used as the threshold. To configure
// a zero bucket with an actual threshold of zero (i.e. only
// DefNativeHistogramZeroThreshold is used as the threshold. To
// configure a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
// constant (or any negative float value).
@ -446,26 +447,34 @@ type HistogramOpts struct {
// Histogram are sufficiently wide-spread. In particular, this could be
// used as a DoS attack vector. Where the observed values depend on
// external inputs, it is highly recommended to set a
// NativeHistogramMaxBucketNumber.) Once the set
// NativeHistogramMaxBucketNumber.) Once the set
// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
// enacted: First, if the last reset (or the creation) of the histogram
// is at least NativeHistogramMinResetDuration ago, then the whole
// histogram is reset to its initial state (including regular
// buckets). If less time has passed, or if
// NativeHistogramMinResetDuration is zero, no reset is
// performed. Instead, the zero threshold is increased sufficiently to
// reduce the number of buckets to or below
// NativeHistogramMaxBucketNumber, but not to more than
// NativeHistogramMaxZeroThreshold. Thus, if
// NativeHistogramMaxZeroThreshold is already at or below the current
// zero threshold, nothing happens at this step. After that, if the
// number of buckets still exceeds NativeHistogramMaxBucketNumber, the
// resolution of the histogram is reduced by doubling the width of the
// sparse buckets (up to a growth factor between one bucket to the next
// of 2^(2^4) = 65536, see above).
// enacted:
// - First, if the last reset (or the creation) of the histogram is at
// least NativeHistogramMinResetDuration ago, then the whole
// histogram is reset to its initial state (including regular
// buckets).
// - If less time has passed, or if NativeHistogramMinResetDuration is
// zero, no reset is performed. Instead, the zero threshold is
// increased sufficiently to reduce the number of buckets to or below
// NativeHistogramMaxBucketNumber, but not to more than
// NativeHistogramMaxZeroThreshold. Thus, if
// NativeHistogramMaxZeroThreshold is already at or below the current
// zero threshold, nothing happens at this step.
// - After that, if the number of buckets still exceeds
// NativeHistogramMaxBucketNumber, the resolution of the histogram is
// reduced by doubling the width of the sparse buckets (up to a
// growth factor between one bucket to the next of 2^(2^4) = 65536,
// see above).
// - Any increased zero threshold or reduced resolution is reset back
// to their original values once NativeHistogramMinResetDuration has
// passed (since the last reset or the creation of the histogram).
NativeHistogramMaxBucketNumber uint32
NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
// HistogramVecOpts bundles the options to create a HistogramVec metric.
@ -475,7 +484,7 @@ type HistogramVecOpts struct {
HistogramOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
@ -499,12 +508,12 @@ func NewHistogram(opts HistogramOpts) Histogram {
}
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
if len(desc.variableLabels.names) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
for _, n := range desc.variableLabels {
if n.Name == bucketLabel {
for _, n := range desc.variableLabels.names {
if n == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
@ -514,6 +523,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
if opts.now == nil {
opts.now = time.Now
}
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
@ -521,8 +534,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
lastResetTime: time.Now(),
now: time.Now,
lastResetTime: opts.now(),
now: opts.now,
}
if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
h.upperBounds = DefBuckets
@ -701,9 +714,11 @@ type histogram struct {
nativeHistogramMaxZeroThreshold float64
nativeHistogramMaxBuckets uint32
nativeHistogramMinResetDuration time.Duration
lastResetTime time.Time // Protected by mtx.
// lastResetTime is protected by mtx. It is also used as created timestamp.
lastResetTime time.Time
now func() time.Time // To mock out time.Now() for testing.
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
func (h *histogram) Desc() *Desc {
@ -742,9 +757,10 @@ func (h *histogram) Write(out *dto.Metric) error {
waitForCooldown(count, coldCounts)
his := &dto.Histogram{
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
CreatedTimestamp: timestamppb.New(h.lastResetTime),
}
out.Histogram = his
out.Label = h.labelPairs
@ -782,6 +798,16 @@ func (h *histogram) Write(out *dto.Metric) error {
his.ZeroCount = proto.Uint64(zeroBucket)
his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
// Add a no-op span to a histogram without observations and with
// a zero threshold of zero. Otherwise, a native histogram would
// look like a classic histogram to scrapers.
if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
his.PositiveSpan = []*dto.BucketSpan{{
Offset: proto.Int32(0),
Length: proto.Uint32(0),
}}
}
}
addAndResetCounts(hotCounts, coldCounts)
return nil
@ -854,20 +880,23 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
h.doubleBucketWidth(hotCounts, coldCounts)
}
// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration
// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration
// has been passed. It returns true if the histogram has been reset. The caller
// must have locked h.mtx.
func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool {
func (h *histogram) maybeReset(
hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
) bool {
// We are using the possibly mocked h.now() rather than
// time.Since(h.lastResetTime) to enable testing.
if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
if h.nativeHistogramMinResetDuration == 0 ||
h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
return false
}
// Completely reset coldCounts.
h.resetCounts(cold)
// Repeat the latest observation to not lose it completely.
cold.observe(value, bucket, true)
// Make coldCounts the new hot counts while ressetting countAndHotIdx.
// Make coldCounts the new hot counts while resetting countAndHotIdx.
n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
count := n & ((1 << 63) - 1)
waitForCooldown(count, hot)
@ -1176,6 +1205,7 @@ type constHistogram struct {
sum float64
buckets map[float64]uint64
labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
}
func (h *constHistogram) Desc() *Desc {
@ -1183,7 +1213,9 @@ func (h *constHistogram) Desc() *Desc {
}
func (h *constHistogram) Write(out *dto.Metric) error {
his := &dto.Histogram{}
his := &dto.Histogram{
CreatedTimestamp: h.createdTs,
}
buckets := make([]*dto.Bucket, 0, len(h.buckets))
@ -1230,7 +1262,7 @@ func NewConstHistogram(
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constHistogram{
@ -1324,7 +1356,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
// Multiple spans with only small gaps in between are probably
// encoded more efficiently as one larger span with a few empty
// buckets. Needs some research to find the sweet spot. For now,
// we assume that gaps of one ore two buckets should not create
// we assume that gaps of one or two buckets should not create
// a new span.
iDelta := int32(i - nextI)
if n == 0 || iDelta > 2 {

View file

@ -14,7 +14,7 @@
// It provides tools to compare sequences of strings and generate textual diffs.
//
// Maintaining `GetUnifiedDiffString` here because original repository
// (https://github.com/pmezard/go-difflib) is no loger maintained.
// (https://github.com/pmezard/go-difflib) is no longer maintained.
package internal
import (

View file

@ -32,19 +32,15 @@ import (
// create a Desc.
type Labels map[string]string
// LabelConstraint normalizes label values.
type LabelConstraint func(string) string
// ConstrainedLabels represents a label name and its constrain function
// to normalize label values. This type is commonly used when constructing
// metric vector Collectors.
type ConstrainedLabel struct {
Name string
Constraint func(string) string
}
func (cl ConstrainedLabel) Constrain(v string) string {
if cl.Constraint == nil {
return v
}
return cl.Constraint(v)
Constraint LabelConstraint
}
// ConstrainableLabels is an interface that allows creating of labels that can
@ -58,7 +54,7 @@ func (cl ConstrainedLabel) Constrain(v string) string {
// },
// })
type ConstrainableLabels interface {
constrainedLabels() ConstrainedLabels
compile() *compiledLabels
labelNames() []string
}
@ -67,8 +63,20 @@ type ConstrainableLabels interface {
// metric vector Collectors.
type ConstrainedLabels []ConstrainedLabel
func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels {
return cls
func (cls ConstrainedLabels) compile() *compiledLabels {
compiled := &compiledLabels{
names: make([]string, len(cls)),
labelConstraints: map[string]LabelConstraint{},
}
for i, label := range cls {
compiled.names[i] = label.Name
if label.Constraint != nil {
compiled.labelConstraints[label.Name] = label.Constraint
}
}
return compiled
}
func (cls ConstrainedLabels) labelNames() []string {
@ -92,18 +100,36 @@ func (cls ConstrainedLabels) labelNames() []string {
// }
type UnconstrainedLabels []string
func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels {
constrainedLabels := make([]ConstrainedLabel, len(uls))
for i, l := range uls {
constrainedLabels[i] = ConstrainedLabel{Name: l}
func (uls UnconstrainedLabels) compile() *compiledLabels {
return &compiledLabels{
names: uls,
}
return constrainedLabels
}
func (uls UnconstrainedLabels) labelNames() []string {
return uls
}
type compiledLabels struct {
names []string
labelConstraints map[string]LabelConstraint
}
func (cls *compiledLabels) compile() *compiledLabels {
return cls
}
func (cls *compiledLabels) labelNames() []string {
return cls.names
}
func (cls *compiledLabels) constrain(labelName, value string) string {
if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
return fn(value)
}
return value
}
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"

View file

@ -92,6 +92,9 @@ type Opts struct {
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
// BuildFQName joins the given three name components by "_". Empty name

View file

@ -17,7 +17,7 @@
// constructors register the Collectors with a registry before returning them.
// There are two sets of constructors. The constructors in the first set are
// top-level functions, while the constructors in the other set are methods of
// the Factory type. The top-level function return Collectors registered with
// the Factory type. The top-level functions return Collectors registered with
// the global registry (prometheus.DefaultRegisterer), while the methods return
// Collectors registered with the registry the Factory was constructed with. All
// constructors panic if the registration fails.
@ -85,7 +85,7 @@
// }
//
// A Factory is created with the With(prometheus.Registerer) function, which
// enables two usage pattern. With(prometheus.Registerer) can be called once per
// enables two usage patterns. With(prometheus.Registerer) can be called once per
// line:
//
// var (
@ -153,7 +153,7 @@
// importing a package.
//
// A separate package allows conservative users to entirely ignore it. And
// whoever wants to use it, will do so explicitly, with an opportunity to read
// whoever wants to use it will do so explicitly, with an opportunity to read
// this warning.
//
// Enjoy promauto responsibly!

View file

@ -389,16 +389,13 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
return true
}
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
labels := prometheus.Labels{}
if !(code || method) {
return labels
}
if code {
labels["code"] = sanitizeCode(status)
}

View file

@ -548,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
goroutineBudget--
runtime.Gosched()
}
// Once both checkedMetricChan and uncheckdMetricChan are closed
// Once both checkedMetricChan and uncheckedMetricChan are closed
// and drained, the contraption above will nil out cmc and umc,
// and then we can leave the collect loop here.
if cmc == nil && umc == nil {
@ -963,9 +963,9 @@ func checkDescConsistency(
// Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels {
for _, l := range desc.variableLabels.names {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l.Name),
Name: proto.String(l),
})
}
if len(lpsFromDesc) != len(dtoMetric.Label) {

View file

@ -26,6 +26,7 @@ import (
"github.com/beorn7/perks/quantile"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
// quantileLabel is used for the label that defines the quantile in a
@ -145,6 +146,9 @@ type SummaryOpts struct {
// is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile").
BufCap uint32
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
// SummaryVecOpts bundles the options to create a SummaryVec metric.
@ -154,7 +158,7 @@ type SummaryVecOpts struct {
SummaryOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
@ -188,12 +192,12 @@ func NewSummary(opts SummaryOpts) Summary {
}
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
if len(desc.variableLabels.names) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
for _, n := range desc.variableLabels {
if n.Name == quantileLabel {
for _, n := range desc.variableLabels.names {
if n == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
@ -222,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
opts.BufCap = DefBufCap
}
if opts.now == nil {
opts.now = time.Now
}
if len(opts.Objectives) == 0 {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
@ -230,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
s.createdTs = timestamppb.New(opts.now())
return s
}
@ -245,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
coldBuf: make([]float64, 0, opts.BufCap),
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
}
s.headStreamExpTime = time.Now().Add(s.streamDuration)
s.headStreamExpTime = opts.now().Add(s.streamDuration)
s.hotBufExpTime = s.headStreamExpTime
for i := uint32(0); i < opts.AgeBuckets; i++ {
@ -259,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
sort.Float64s(s.sortedObjectives)
s.init(s) // Init self-collection.
s.createdTs = timestamppb.New(opts.now())
return s
}
@ -286,6 +295,8 @@ type summary struct {
headStream *quantile.Stream
headStreamIdx int
headStreamExpTime, hotBufExpTime time.Time
createdTs *timestamppb.Timestamp
}
func (s *summary) Desc() *Desc {
@ -307,7 +318,9 @@ func (s *summary) Observe(v float64) {
}
func (s *summary) Write(out *dto.Metric) error {
sum := &dto.Summary{}
sum := &dto.Summary{
CreatedTimestamp: s.createdTs,
}
qs := make([]*dto.Quantile, 0, len(s.objectives))
s.bufMtx.Lock()
@ -440,6 +453,8 @@ type noObjectivesSummary struct {
counts [2]*summaryCounts
labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
}
func (s *noObjectivesSummary) Desc() *Desc {
@ -490,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
}
sum := &dto.Summary{
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
CreatedTimestamp: s.createdTs,
}
out.Summary = sum
@ -681,6 +697,7 @@ type constSummary struct {
sum float64
quantiles map[float64]float64
labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
}
func (s *constSummary) Desc() *Desc {
@ -688,7 +705,9 @@ func (s *constSummary) Desc() *Desc {
}
func (s *constSummary) Write(out *dto.Metric) error {
sum := &dto.Summary{}
sum := &dto.Summary{
CreatedTimestamp: s.createdTs,
}
qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count)
@ -737,7 +756,7 @@ func NewConstSummary(
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constSummary{

View file

@ -14,6 +14,7 @@
package prometheus
import (
"errors"
"fmt"
"sort"
"time"
@ -91,7 +92,7 @@ func (v *valueFunc) Desc() *Desc {
}
func (v *valueFunc) Write(out *dto.Metric) error {
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
}
// NewConstMetric returns a metric with one fixed value that cannot be
@ -105,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
metric := &dto.Metric{}
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil {
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
return nil, err
}
@ -130,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal
return m
}
// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
// with created timestamp set and returns an error for other metric types.
func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
switch valueType {
case CounterValue:
break
default:
return nil, errors.New("created timestamps are only supported for counters")
}
metric := &dto.Metric{}
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
return nil, err
}
return &constMetric{
desc: desc,
metric: metric,
}, nil
}
// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
// NewConstMetricWithCreatedTimestamp would have returned an error.
func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
if err != nil {
panic(err)
}
return m
}
type constMetric struct {
desc *Desc
metric *dto.Metric
@ -153,11 +191,12 @@ func populateMetric(
labelPairs []*dto.LabelPair,
e *dto.Exemplar,
m *dto.Metric,
ct *timestamppb.Timestamp,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
@ -176,19 +215,19 @@ func populateMetric(
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.
return nil
}
if len(desc.variableLabels) == 0 {
if len(desc.variableLabels.names) == 0 {
// Moderately fast path.
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
for i, l := range desc.variableLabels {
for i, l := range desc.variableLabels.names {
labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(l.Name),
Name: proto.String(l),
Value: proto.String(labelValues[i]),
})
}

View file

@ -20,24 +20,6 @@ import (
"github.com/prometheus/common/model"
)
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
func getLabelsFromPool() Labels {
return labelsPool.Get().(Labels)
}
func putLabelsToPool(labels Labels) {
for k := range labels {
delete(labels, k)
}
labelsPool.Put(labels)
}
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
@ -91,6 +73,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@ -110,8 +93,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
labels, closer := constrainLabels(m.desc, labels)
defer closer()
h, err := m.hashLabels(labels)
if err != nil {
@ -128,8 +111,8 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
labels, closer := constrainLabels(m.desc, labels)
defer closer()
return m.metricMap.deleteByLabels(labels, m.curry)
}
@ -169,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
oldCurry = m.curry
iCurry int
)
for i, label := range m.desc.variableLabels {
val, ok := labels[label.Name]
for i, labelName := range m.desc.variableLabels.names {
val, ok := labels[labelName]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
return nil, fmt.Errorf("label name %q is already curried", label.Name)
return nil, fmt.Errorf("label name %q is already curried", labelName)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
@ -181,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok {
continue // Label stays uncurried.
}
newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)})
newCurry = append(newCurry, curriedLabelValue{
i,
m.desc.variableLabels.constrain(labelName, val),
})
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@ -250,8 +236,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
labels, closer := constrainLabels(m.desc, labels)
defer closer()
h, err := m.hashLabels(labels)
if err != nil {
@ -262,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@ -271,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
curry = m.curry
iVals, iCurry int
)
for i := 0; i < len(m.desc.variableLabels); i++ {
for i := 0; i < len(m.desc.variableLabels.names); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
@ -285,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@ -294,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
curry = m.curry
iCurry int
)
for i, label := range m.desc.variableLabels {
val, ok := labels[label.Name]
for i, labelName := range m.desc.variableLabels.names {
val, ok := labels[labelName]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
return 0, fmt.Errorf("label name %q is already curried", label.Name)
return 0, fmt.Errorf("label name %q is already curried", labelName)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label.Name)
return 0, fmt.Errorf("label name %q missing in label map", labelName)
}
h = m.hashAdd(h, val)
}
@ -482,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels {
// Check if the target label exists in our metrics and get the index.
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames())
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
if validLabel {
// Check the value of that label against the target value.
// We don't consider curried values in partial matches.
@ -626,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
return false
}
iCurry := 0
for i, k := range desc.variableLabels {
for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
if values[i] != curry[iCurry].value {
return false
@ -634,7 +620,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
iCurry++
continue
}
if values[i] != labels[k.Name] {
if values[i] != labels[k] {
return false
}
}
@ -644,13 +630,13 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
labelValues := make([]string, len(labels)+len(curry))
iCurry := 0
for i, k := range desc.variableLabels {
for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value
iCurry++
continue
}
labelValues[i] = labels[k.Name]
labelValues[i] = labels[k]
}
return labelValues
}
@ -670,20 +656,37 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
return labelValues
}
func constrainLabels(desc *Desc, labels Labels) Labels {
constrainedLabels := getLabelsFromPool()
for l, v := range labels {
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
v = desc.variableLabels[i].Constrain(v)
}
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
constrainedLabels[l] = v
func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
if len(desc.variableLabels.labelConstraints) == 0 {
// Fast path when there's no constraints
return labels, func() {}
}
return constrainedLabels
constrainedLabels := labelsPool.Get().(Labels)
for l, v := range labels {
constrainedLabels[l] = desc.variableLabels.constrain(l, v)
}
return constrainedLabels, func() {
for k := range constrainedLabels {
delete(constrainedLabels, k)
}
labelsPool.Put(constrainedLabels)
}
}
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
if len(desc.variableLabels.labelConstraints) == 0 {
// Fast path when there's no constraints
return lvs
}
constrainedValues := make([]string, len(lvs))
var iCurry, iLVs int
for i := 0; i < len(lvs)+len(curry); i++ {
@ -692,8 +695,11 @@ func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) [
continue
}
if i < len(desc.variableLabels) {
constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs])
if i < len(desc.variableLabels.names) {
constrainedValues[iLVs] = desc.variableLabels.constrain(
desc.variableLabels.names[i],
lvs[iLVs],
)
} else {
constrainedValues[iLVs] = lvs[iLVs]
}

View file

@ -215,8 +215,9 @@ type Counter struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
}
func (x *Counter) Reset() {
@ -265,6 +266,13 @@ func (x *Counter) GetExemplar() *Exemplar {
return nil
}
func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
type Quantile struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -325,9 +333,10 @@ type Summary struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
}
func (x *Summary) Reset() {
@ -383,6 +392,13 @@ func (x *Summary) GetQuantile() []*Quantile {
return nil
}
func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
type Untyped struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -439,7 +455,8 @@ type Histogram struct {
SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
// Buckets for the conventional histogram.
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
@ -525,6 +542,13 @@ func (x *Histogram) GetBucket() []*Bucket {
return nil
}
func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
func (x *Histogram) GetSchema() int32 {
if x != nil && x.Schema != nil {
return *x.Schema
@ -972,137 +996,151 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d,
0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c,
0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a,
0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65,
0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74,
0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d,
0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04,
0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e,
0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63,
0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73,
0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68,
0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65,
0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72,
0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65,
0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72,
0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20,
0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65,
0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65,
0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03,
0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74,
0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74,
0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69,
0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61,
0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12,
0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74,
0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d,
0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01,
0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75,
0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76,
0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20,
0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70,
0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a,
0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01,
0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06,
0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65,
0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61,
0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69,
0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38,
0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50,
0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61,
0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a,
0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72,
0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53,
0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12,
0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52,
0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74,
0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f,
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69,
0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68,
0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54,
0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a,
0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41,
0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59,
0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12,
0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13,
0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41,
0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f,
0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75,
0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f,
0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52,
0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62,
0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65,
0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e,
0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11,
0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12,
0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75,
0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61,
0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75,
0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67,
0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68,
0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12,
0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52,
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18,
0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55,
0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b,
0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48,
0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41,
0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42,
0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69,
0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74,
}
var (
@ -1137,26 +1175,29 @@ var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
}
var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
16, // [16:16] is the sub-list for method output_type
16, // [16:16] is the sub-list for method input_type
16, // [16:16] is the sub-list for extension type_name
16, // [16:16] is the sub-list for extension extendee
0, // [0:16] is the sub-list for field type_name
13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
19, // [19:19] is the sub-list for method output_type
19, // [19:19] is the sub-list for method input_type
19, // [19:19] is the sub-list for extension type_name
19, // [19:19] is the sub-list for extension extendee
0, // [0:19] is the sub-list for field type_name
}
func init() { file_io_prometheus_client_metrics_proto_init() }

View file

@ -2,6 +2,7 @@
linters:
enable:
- godot
- misspell
- revive
linter-settings:
@ -10,3 +11,5 @@ linter-settings:
exclude:
# Ignore "See: URL"
- 'See:'
misspell:
locale: US

View file

@ -49,19 +49,19 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
ifneq ($(shell which gotestsum),)
ifneq ($(shell command -v gotestsum > /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
PROMU_VERSION ?= 0.14.0
PROMU_VERSION ?= 0.15.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.51.2
GOLANGCI_LINT_VERSION ?= v1.53.3
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -178,7 +178,7 @@ endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell which yamllint))
ifeq (, $(shell command -v yamllint > /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .

View file

@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
```bash
rm -rf fixtures
rm -rf testdata/fixtures
make test
```
Next, make the required changes to the extracted files in the `fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
`git diff fixtures.ttar`.
`git diff testdata/fixtures.ttar`.

View file

@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
}
entries = append(entries, entry)
} else {
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
}
}

View file

@ -55,7 +55,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
parts := strings.Fields(line)
if len(parts) < 4 {
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
}
node := strings.TrimRight(parts[1], ",")
@ -66,7 +66,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
bucketCount = arraySize
} else {
if bucketCount != arraySize {
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
}
}
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
}
}

View file

@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -421,7 +422,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@ -466,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)

View file

@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
}
return crypto, nil
@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":")
if len(kv) != 2 {
return nil, fmt.Errorf("malformed crypto line: %q", text)
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
}
k := strings.TrimSpace(kv[0])

View file

@ -20,8 +20,8 @@ import (
// FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures.
type FS struct {
proc fs.FS
real bool
proc fs.FS
isReal bool
}
// DefaultMountPoint is the common mount point of the proc filesystem.
@ -41,10 +41,10 @@ func NewFS(mountPoint string) (FS, error) {
return FS{}, err
}
real, err := isRealProc(mountPoint)
isReal, err := isRealProc(mountPoint)
if err != nil {
return FS{}, err
}
return FS{fs, real}, nil
return FS{fs, isReal}, nil
}

View file

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build netbsd || openbsd || solaris || windows
// +build netbsd openbsd solaris windows
//go:build netbsd || openbsd || solaris || windows || nostatfs
// +build netbsd openbsd solaris windows nostatfs
package procfs

View file

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !netbsd && !openbsd && !solaris && !windows
// +build !netbsd,!openbsd,!solaris,!windows
//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs
// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs
package procfs

View file

@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
}
return *m, nil
@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
}
for i := range setFields {
@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
}
switch fields[0] {

View file

@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
}
default:
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
}
portString := s[len(s)-4:]
if len(portString) != 4 {
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
return nil, 0,
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {

View file

@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("could not parse load %q: %w", load, err)
return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
}
}
return &LoadAvg{

View file

@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
}
mdstat, err := parseMDStat(data)
if err != nil {
return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@ -90,13 +90,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
}
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
}
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %w", err)
return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
}
syncLineIdx := i + 2
@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else {
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
}
}
@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
}
down = int64(strings.Count(matches[4], "_"))
@ -209,42 +209,42 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
}
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
}
return syncedBlocks, pct, finish, speed, nil

Some files were not shown because too many files have changed in this diff Show more