upload/azure: migrate from azure-storage-blob-go to azure-sdk-for-go

https://github.com/Azure/azure-storage-blob-go/ is deprecated, the main SDK
should be now used instead. Let's migrate the code. There should be no
functional changes.

Signed-off-by: Ondřej Budai <ondrej@budai.cz>
This commit is contained in:
Ondřej Budai 2023-04-03 14:57:57 +02:00 committed by Ondřej Budai
parent 9beddf626f
commit abe6ccfb50
226 changed files with 29224 additions and 30426 deletions

6
go.mod
View file

@ -8,9 +8,8 @@ require (
cloud.google.com/go/cloudbuild v1.2.0
cloud.google.com/go/compute v1.10.0
cloud.google.com/go/storage v1.27.0
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
github.com/Azure/azure-storage-blob-go v0.13.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
github.com/Azure/go-autorest/autorest v0.11.28
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11
github.com/BurntSushi/toml v1.2.1
@ -56,6 +55,8 @@ require (
require (
cloud.google.com/go v0.104.0 // indirect
cloud.google.com/go/iam v0.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
@ -115,7 +116,6 @@ require (
github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-ieproxy v0.0.1 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect

21
go.sum
View file

@ -74,13 +74,16 @@ contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EU
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo=
github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo=
github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc=
github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0 h1:VuHAcMq8pU1IWNT/m5yRaGqbK0BiQKHT8X4DTp9CHdI=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 h1:Oj853U9kG+RLTCQXpjvOnrv0WaZHxgmZz1TlLywgOPY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
@ -90,7 +93,6 @@ github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsC
github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
@ -113,6 +115,7 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
@ -391,6 +394,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@ -889,6 +893,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U=
github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
@ -938,8 +943,6 @@ github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
@ -1116,6 +1119,7 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -1524,7 +1528,6 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -1649,7 +1652,6 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1680,7 +1682,6 @@ golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View file

@ -10,16 +10,17 @@ import (
"net/url"
"os"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
// NOTE these are deprecated and will need replacement, see issue #2977
//nolint:staticcheck
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-09-01/network"
//nolint:staticcheck
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/upload/azure"
)
@ -111,21 +112,17 @@ func DeleteImageFromAzure(c *azureCredentials, imageName string) error {
return err
}
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
// get blob URL endpoint.
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", c.StorageAccount, c.ContainerName, imageName))
// get storage account blob service URL endpoint.
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s", c.StorageAccount, c.ContainerName))
client, err := blob.NewClientWithSharedKeyCredential(URL.String(), credential, nil)
if err != nil {
return fmt.Errorf("cannot create a new blob client: %w", err)
}
// Create a ContainerURL object that wraps the container URL and a request
// pipeline to make requests.
containerURL := azblob.NewContainerURL(*URL, p)
// Create the container, use a never-expiring context
ctx := context.Background()
blobURL := containerURL.NewPageBlobURL(imageName)
_, err = blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
_, err = client.Delete(context.Background(), &blob.DeleteOptions{
DeleteSnapshots: common.ToPtr(blob.DeleteSnapshotsOptionTypeInclude),
})
if err != nil {
return fmt.Errorf("cannot delete the image: %v", err)

View file

@ -2,6 +2,7 @@ package common
import (
"fmt"
"io"
"regexp"
"runtime"
"sort"
@ -88,3 +89,15 @@ func DataSizeToUint64(size string) (uint64, error) {
// unknown units.
return 0, fmt.Errorf("unknown data size units in string: %s", size)
}
// NopSeekCloser returns an io.ReadSeekCloser with a no-op Close method
// wrapping the provided io.ReadSeeker r.
func NopSeekCloser(r io.ReadSeeker) io.ReadSeekCloser {
return nopSeekCloser{r}
}
type nopSeekCloser struct {
io.ReadSeeker
}
func (nopSeekCloser) Close() error { return nil }

View file

@ -16,15 +16,20 @@ import (
"strings"
"sync"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob"
"github.com/google/uuid"
"github.com/osbuild/osbuild-composer/internal/common"
)
// StorageClient is a client for the Azure Storage API,
// see the docs: https://docs.microsoft.com/en-us/rest/api/storageservices/
type StorageClient struct {
pipeline pipeline.Pipeline
credential *azblob.SharedKeyCredential
}
// NewStorageClient creates a new client for Azure Storage API.
@ -37,9 +42,8 @@ func NewStorageClient(storageAccount, storageAccessKey string) (*StorageClient,
return nil, fmt.Errorf("cannot create shared key credential: %v", err)
}
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
return &StorageClient{
pipeline: p,
credential: credential,
}, nil
}
@ -54,6 +58,10 @@ type BlobMetadata struct {
// DefaultUploadThreads defines a tested default value for the UploadPageBlob method's threads parameter.
const DefaultUploadThreads = 16
// PageBlobMaxUploadPagesBytes defines how much bytes can we upload in a single UploadPages call.
// See https://learn.microsoft.com/en-us/rest/api/storageservices/put-page
const PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024
// UploadPageBlob takes the metadata and credentials required to upload the image specified by `fileName`
// It can speed up the upload by using goroutines. The number of parallel goroutines is bounded by
// the `threads` argument.
@ -61,12 +69,12 @@ const DefaultUploadThreads = 16
// Note that if you want to create an image out of the page blob, make sure that metadata.BlobName
// has a .vhd extension, see EnsureVHDExtension.
func (c StorageClient) UploadPageBlob(metadata BlobMetadata, fileName string, threads int) error {
// get storage account blob service URL endpoint.
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s", metadata.StorageAccount, metadata.ContainerName))
// Create a ContainerURL object that wraps the container URL and a request
// pipeline to make requests.
containerURL := azblob.NewContainerURL(*URL, c.pipeline)
// Create a page blob client.
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", metadata.StorageAccount, metadata.ContainerName, metadata.BlobName))
client, err := pageblob.NewClientWithSharedKeyCredential(URL.String(), c.credential, nil)
if err != nil {
return fmt.Errorf("cannot create a pageblob client: %w", err)
}
// Create the container, use a never-expiring context
ctx := context.Background()
@ -100,16 +108,14 @@ func (c StorageClient) UploadPageBlob(metadata BlobMetadata, fileName string, th
return fmt.Errorf("cannot seek the image: %v", err)
}
// Create page blob URL. Page blob is required for VM images
blobURL := containerURL.NewPageBlobURL(metadata.BlobName)
_, err = blobURL.Create(ctx, stat.Size(), 0, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}, azblob.PremiumPageBlobAccessTierNone, azblob.BlobTagsMap{}, azblob.ClientProvidedKeyOptions{})
// Create page blob. Page blob is required for VM images
_, err = client.Create(ctx, stat.Size(), &pageblob.CreateOptions{
HTTPHeaders: &blob.HTTPHeaders{
BlobContentMD5: imageFileHash.Sum(nil),
},
})
if err != nil {
return fmt.Errorf("cannot create the blob URL: %v", err)
}
// Wrong MD5 does not seem to have any impact on the upload
_, err = blobURL.SetHTTPHeaders(ctx, azblob.BlobHTTPHeaders{ContentMD5: imageFileHash.Sum(nil)}, azblob.BlobAccessConditions{})
if err != nil {
return fmt.Errorf("cannot set the HTTP headers on the blob URL: %v", err)
return fmt.Errorf("cannot create a new page blob: %w", err)
}
// Create control variables
@ -125,7 +131,7 @@ func (c StorageClient) UploadPageBlob(metadata BlobMetadata, fileName string, th
run := true
var wg sync.WaitGroup
for run {
buffer := make([]byte, azblob.PageBlobMaxUploadPagesBytes)
buffer := make([]byte, PageBlobMaxUploadPagesBytes)
n, err := reader.Read(buffer)
if err != nil {
if err == io.EOF {
@ -141,7 +147,11 @@ func (c StorageClient) UploadPageBlob(metadata BlobMetadata, fileName string, th
semaphore <- 1
go func(counter int64, buffer []byte, n int) {
defer wg.Done()
_, err = blobURL.UploadPages(ctx, counter*azblob.PageBlobMaxUploadPagesBytes, bytes.NewReader(buffer[:n]), azblob.PageBlobAccessConditions{}, nil, azblob.ClientProvidedKeyOptions{})
uploadRange := blob.HTTPRange{
Offset: counter * PageBlobMaxUploadPagesBytes,
Count: int64(n),
}
_, err := client.UploadPages(ctx, common.NopSeekCloser(bytes.NewReader(buffer[:n])), uploadRange, nil)
if err != nil {
err = fmt.Errorf("uploading a page failed: %v", err)
// Send the error to the error channel in a non-blocking way. If there is already an error, just discard this one
@ -171,14 +181,19 @@ func (c StorageClient) UploadPageBlob(metadata BlobMetadata, fileName string, th
// this method is no-op.
func (c StorageClient) CreateStorageContainerIfNotExist(ctx context.Context, storageAccount, name string) error {
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s", storageAccount, name))
containerURL := azblob.NewContainerURL(*URL, c.pipeline)
_, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
cl, err := container.NewClientWithSharedKeyCredential(URL.String(), c.credential, nil)
if err != nil {
if storageErr, ok := err.(azblob.StorageError); ok && storageErr.ServiceCode() == azblob.ServiceCodeContainerAlreadyExists {
return fmt.Errorf("cannot create a storage container client: %w", err)
}
_, err = cl.Create(ctx, nil)
if err != nil {
if bloberror.HasCode(err, bloberror.ContainerAlreadyExists) {
return nil
}
return fmt.Errorf("cannot create a storage container: %v", err)
return fmt.Errorf("cannot create a storage container: %w", err)
}
return nil
@ -198,12 +213,14 @@ func (c StorageClient) TagBlob(ctx context.Context, metadata BlobMetadata, tags
}
}
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s", metadata.StorageAccount, metadata.ContainerName))
containerURL := azblob.NewContainerURL(*URL, c.pipeline)
URL, _ := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", metadata.StorageAccount, metadata.ContainerName, metadata.BlobName))
blobURL := containerURL.NewPageBlobURL(metadata.BlobName)
client, err := blob.NewClientWithSharedKeyCredential(URL.String(), c.credential, nil)
if err != nil {
return fmt.Errorf("cannot create a blob client: %w", err)
}
_, err := blobURL.SetTags(ctx, nil, nil, nil, nil, nil, nil, tags)
_, err = client.SetTags(ctx, tags, nil)
if err != nil {
return fmt.Errorf("cannot tag the blob: %v", err)
}

View file

@ -1,284 +0,0 @@
package pipeline
import (
"context"
"github.com/mattn/go-ieproxy"
"net"
"net/http"
"os"
"time"
)
// The Factory interface represents an object that can create its Policy object. Each HTTP request sent
// requires that this Factory create a new instance of its Policy object.
type Factory interface {
New(next Policy, po *PolicyOptions) Policy
}
// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc
// New calls f(next,po).
func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy {
return f(next, po)
}
// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
// Response goes backward through the linked-list for additional processing.
// NOTE: Request is passed by value so changes do not change the caller's version of
// the request. However, Request has some fields that reference mutable objects (not strings).
// These references are copied; a deep copy is not performed. Specifically, this means that
// you should avoid modifying the objects referred to by these fields: URL, Header, Body,
// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
type Policy interface {
Do(ctx context.Context, request Request) (Response, error)
}
// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
type PolicyFunc func(ctx context.Context, request Request) (Response, error)
// Do calls f(ctx, request).
func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) {
return f(ctx, request)
}
// Options configures a Pipeline's behavior.
type Options struct {
HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
Log LogOptions
}
// LogLevel tells a logger the minimum level to log. When code reports a log entry,
// the LogLevel indicates the level of the log entry. The logger only records entries
// whose level is at least the level it was told to log. See the Log* constants.
// For example, if a logger is configured with LogError, then LogError, LogPanic,
// and LogFatal entries will be logged; lower level entries are ignored.
type LogLevel uint32
const (
// LogNone tells a logger not to log any entries passed to it.
LogNone LogLevel = iota
// LogFatal tells a logger to log all LogFatal entries passed to it.
LogFatal
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
LogPanic
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
LogError
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogWarning
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogInfo
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
LogDebug
)
// LogOptions configures the pipeline's logging mechanism & level filtering.
type LogOptions struct {
Log func(level LogLevel, message string)
// ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
// An application can return different values over the its lifetime; this allows the application to dynamically
// alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
// you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
// Usually, the function will be implemented simply like this: return level <= LogWarning
ShouldLog func(level LogLevel) bool
}
type pipeline struct {
factories []Factory
options Options
}
// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
//
// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
type Pipeline interface {
Do(ctx context.Context, methodFactory Factory, request Request) (Response, error)
}
// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
func NewPipeline(factories []Factory, o Options) Pipeline {
if o.HTTPSender == nil {
o.HTTPSender = newDefaultHTTPClientFactory()
}
if o.Log.Log == nil {
o.Log.Log = func(LogLevel, string) {} // No-op logger
}
return &pipeline{factories: factories, options: o}
}
// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
// ultimately sends the transformed HTTP request over the network.
func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) {
response, err := p.newPolicies(methodFactory).Do(ctx, request)
request.close()
return response, err
}
func (p *pipeline) newPolicies(methodFactory Factory) Policy {
// The last Policy is the one that actually sends the request over the wire and gets the response.
// It is overridable via the Options' HTTPSender field.
po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
next := p.options.HTTPSender.New(nil, po)
// Walk over the slice of Factory objects in reverse (from wire to API)
markers := 0
for i := len(p.factories) - 1; i >= 0; i-- {
factory := p.factories[i]
if _, ok := factory.(methodFactoryMarker); ok {
markers++
if markers > 1 {
panic("MethodFactoryMarker can only appear once in the pipeline")
}
if methodFactory != nil {
// Replace MethodFactoryMarker with passed-in methodFactory
next = methodFactory.New(next, po)
}
} else {
// Use the slice's Factory to construct its Policy
next = factory.New(next, po)
}
}
// Each Factory has created its Policy
if markers == 0 && methodFactory != nil {
panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline")
}
return next // Return head of the Policy object linked-list
}
// A PolicyOptions represents optional information that can be used by a node in the
// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
// which passes it (if desired) to the Policy object it creates. Today, the Policy object
// uses the options to perform logging. But, in the future, this could be used for more.
type PolicyOptions struct {
pipeline *pipeline
}
// ShouldLog returns true if the specified log level should be logged.
func (po *PolicyOptions) ShouldLog(level LogLevel) bool {
if po.pipeline.options.Log.ShouldLog != nil {
return po.pipeline.options.Log.ShouldLog(level)
}
return false
}
// Log logs a string to the Pipeline's Logger.
func (po *PolicyOptions) Log(level LogLevel, msg string) {
if !po.ShouldLog(level) {
return // Short circuit message formatting if we're not logging it
}
// We are logging it, ensure trailing newline
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
msg += "\n" // Ensure trailing newline
}
po.pipeline.options.Log.Log(level, msg)
// If logger doesn't handle fatal/panic, we'll do it here.
if level == LogFatal {
os.Exit(1)
} else if level == LogPanic {
panic(msg)
}
}
var pipelineHTTPClient = newDefaultHTTPClient()
func newDefaultHTTPClient() *http.Client {
// We want the Transport to have a large connection pool
return &http.Client{
Transport: &http.Transport{
Proxy: ieproxy.GetProxyFunc(),
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
Dial /*Context*/ : (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).Dial, /*Context*/
MaxIdleConns: 0, // No limit
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableKeepAlives: false,
DisableCompression: false,
MaxResponseHeaderBytes: 0,
//ResponseHeaderTimeout: time.Duration{},
//ExpectContinueTimeout: time.Duration{},
},
}
}
// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
func newDefaultHTTPClientFactory() Factory {
return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc {
return func(ctx context.Context, request Request) (Response, error) {
r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
if err != nil {
err = NewError(err, "HTTP request failed")
}
return NewHTTPResponse(r), err
}
})
}
var mfm = methodFactoryMarker{} // Singleton
// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
func MethodFactoryMarker() Factory {
return mfm
}
type methodFactoryMarker struct {
}
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
panic("methodFactoryMarker policy should have been replaced with a method policy")
}
// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
// By default no implemetation is provided here, because pipeline may be used in many different
// contexts, so the correct implementation is context-dependent
type LogSanitizer interface {
SanitizeLogMessage(raw string) string
}
var sanitizer LogSanitizer
var enableForceLog bool = true
// SetLogSanitizer can be called to supply a custom LogSanitizer.
// There is no threadsafety or locking on the underlying variable,
// so call this function just once at startup of your application
// (Don't later try to change the sanitizer on the fly).
func SetLogSanitizer(s LogSanitizer)(){
sanitizer = s
}
// SetForceLogEnabled can be used to disable ForceLog
// There is no threadsafety or locking on the underlying variable,
// so call this function just once at startup of your application
// (Don't later try to change the setting on the fly).
func SetForceLogEnabled(enable bool)() {
enableForceLog = enable
}

View file

@ -1,14 +0,0 @@
package pipeline
// ForceLog should rarely be used. It forceable logs an entry to the
// Windows Event Log (on Windows) or to the SysLog (on Linux)
func ForceLog(level LogLevel, msg string) {
if !enableForceLog {
return
}
if sanitizer != nil {
msg = sanitizer.SanitizeLogMessage(msg)
}
forceLog(level, msg)
}

View file

@ -1,33 +0,0 @@
// +build !windows,!nacl,!plan9
package pipeline
import (
"log"
"log/syslog"
)
// forceLog should rarely be used. It forceable logs an entry to the
// Windows Event Log (on Windows) or to the SysLog (on Linux)
func forceLog(level LogLevel, msg string) {
if defaultLogger == nil {
return // Return fast if we failed to create the logger.
}
// We are logging it, ensure trailing newline
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
msg += "\n" // Ensure trailing newline
}
switch level {
case LogFatal:
defaultLogger.Fatal(msg)
case LogPanic:
defaultLogger.Panic(msg)
case LogError, LogWarning, LogInfo:
defaultLogger.Print(msg)
}
}
var defaultLogger = func() *log.Logger {
l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags)
return l
}()

View file

@ -1,61 +0,0 @@
package pipeline
import (
"os"
"syscall"
"unsafe"
)
// forceLog should rarely be used. It forceable logs an entry to the
// Windows Event Log (on Windows) or to the SysLog (on Linux)
func forceLog(level LogLevel, msg string) {
var el eventType
switch level {
case LogError, LogFatal, LogPanic:
el = elError
case LogWarning:
el = elWarning
case LogInfo:
el = elInfo
}
// We are logging it, ensure trailing newline
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
msg += "\n" // Ensure trailing newline
}
reportEvent(el, 0, msg)
}
type eventType int16
const (
elSuccess eventType = 0
elError eventType = 1
elWarning eventType = 2
elInfo eventType = 4
)
var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
sourceName, _ := os.Executable()
sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName)
handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16)))
if lastErr == nil { // On error, logging is a no-op
return func(eventType eventType, eventID int32, msg string) {}
}
reportEvent := advAPI32.MustFindProc("ReportEventW")
return func(eventType eventType, eventID int32, msg string) {
s, _ := syscall.UTF16PtrFromString(msg)
_, _, _ = reportEvent.Call(
uintptr(handle), // HANDLE hEventLog
uintptr(eventType), // WORD wType
uintptr(0), // WORD wCategory
uintptr(eventID), // DWORD dwEventID
uintptr(0), // PSID lpUserSid
uintptr(1), // WORD wNumStrings
uintptr(0), // DWORD dwDataSize
uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
uintptr(0)) // LPVOID lpRawData
}
}()

View file

@ -1,161 +0,0 @@
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
/*
Package pipeline implements an HTTP request/response middleware pipeline whose
policy objects mutate an HTTP request's URL, query parameters, and/or headers before
the request is sent over the wire.
Not all policy objects mutate an HTTP request; some policy objects simply impact the
flow of requests/responses by performing operations such as logging, retry policies,
timeouts, failure injection, and deserialization of response payloads.
Implementing the Policy Interface
To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do
method is called when an HTTP request wants to be sent over the network. Your Do method can perform any
operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query
parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object
in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy
object sends the HTTP request over the network (by calling the HTTPSender's Do method).
When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response
(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure
or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response
to the code that initiated the original HTTP request.
Here is a template for how to define a pipeline.Policy object:
type myPolicy struct {
node PolicyNode
// TODO: Add configuration/setting fields here (if desired)...
}
func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
// TODO: Mutate/process the HTTP request here...
response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response
// TODO: Mutate/process the HTTP response here...
return response, err // Return response/error to previous Policy
}
Implementing the Factory Interface
Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New
method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is
passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and
a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object
passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object.
Here is a template for how to define a pipeline.Policy object:
// NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
// this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
type myPolicyFactory struct {
// TODO: Add any configuration/setting fields if desired...
}
func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy {
return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
}
Using your Factory and Policy objects via a Pipeline
To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes
this slice to the pipeline.NewPipeline function.
func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline
This function also requires an object implementing the HTTPSender interface. For simple scenarios,
passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually
send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender
object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects
or other objects that can simulate the network requests for testing purposes.
Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple
wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a
context.Context for cancelling the HTTP request (if desired).
type Pipeline interface {
Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error)
}
Do iterates over the slice of Factory objects and tells each one to create its corresponding
Policy object. After the linked-list of Policy objects have been created, Do calls the first
Policy object passing it the Context & HTTP request parameters. These parameters now flow through
all the Policy objects giving each object a chance to look at and/or mutate the HTTP request.
The last Policy object sends the message over the network.
When the network operation completes, the HTTP response and error return values pass
back through the same Policy objects in reverse order. Most Policy objects ignore the
response/error but some log the result, retry the operation (depending on the exact
reason the operation failed), or deserialize the response's body. Your own Policy
objects can do whatever they like when processing outgoing requests or incoming responses.
Note that after an I/O request runs to completion, the Policy objects for that request
are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing
them to be created once and reused over many I/O operations. This allows for efficient use of
memory and also makes them safely usable by multiple goroutines concurrently.
Inserting a Method-Specific Factory into the Linked-List of Policy Objects
While Pipeline and Factory objects can be reused over many different operations, it is
common to have special behavior for a specific operation/method. For example, a method
may need to deserialize the response's body to an instance of a specific data type.
To accommodate this, the Pipeline's Do method takes an additional method-specific
Factory object. The Do method tells this Factory to create a Policy object and
injects this method-specific Policy object into the linked-list of Policy objects.
When creating a Pipeline object, the slice of Factory objects passed must have 1
(and only 1) entry marking where the method-specific Factory should be injected.
The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function:
func MethodFactoryMarker() pipeline.Factory
Creating an HTTP Request Object
The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct.
Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard
http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function:
func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error)
To this function, you must pass a pipeline.RequestOptions that looks like this:
type RequestOptions struct {
// The readable and seekable stream to be sent to the server as the request's body.
Body io.ReadSeeker
// The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
Progress ProgressReceiver
}
The method and struct ensure that the request's body stream is a read/seekable stream.
A seekable stream is required so that upon retry, the final Policy object can seek
the stream back to the beginning before retrying the network request and re-uploading the
body. In addition, you can associate a ProgressReceiver callback function which will be
invoked periodically to report progress while bytes are being read from the body stream
and sent over the network.
Processing the HTTP Response
When an HTTP response comes in from the network, a reference to Go's http.Response struct is
embedded in a struct that implements the pipeline.Response interface:
type Response interface {
Response() *http.Response
}
This interface is returned through all the Policy objects. Each Policy object can call the Response
interface's Response method to examine (or mutate) the embedded http.Response object.
A Policy object can internally define another struct (implementing the pipeline.Response interface)
that embeds an http.Response and adds additional fields and return this structure to other Policy
objects. This allows a Policy object to deserialize the body to some other struct and return the
original http.Response and the additional struct back through the Policy chain. Other Policy objects
can see the Response but cannot see the additional struct with the deserialized body. After all the
Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method.
The caller of this method can perform a type assertion attempting to get back to the struct type
really returned by the Policy object. If the type assertion is successful, the caller now has
access to both the http.Response and the deserialized struct object.*/
package pipeline

View file

@ -1,184 +0,0 @@
package pipeline
import (
"fmt"
"runtime"
)
type causer interface {
Cause() error
}
func errorWithPC(msg string, pc uintptr) string {
s := ""
if fn := runtime.FuncForPC(pc); fn != nil {
file, line := fn.FileLine(pc)
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
}
s += msg + "\n\n"
return s
}
func getPC(callersToSkip int) uintptr {
// Get the PC of Initialize method's caller.
pc := [1]uintptr{}
_ = runtime.Callers(callersToSkip, pc[:])
return pc[0]
}
// ErrorNode can be an embedded field in a private error object. This field
// adds Program Counter support and a 'cause' (reference to a preceding error).
// When initializing a error type with this embedded field, initialize the
// ErrorNode field by calling ErrorNode{}.Initialize(cause).
type ErrorNode struct {
pc uintptr // Represents a Program Counter that you can get symbols for.
cause error // Refers to the preceding error (or nil)
}
// Error returns a string with the PC's symbols or "" if the PC is invalid.
// When defining a new error type, have its Error method call this one passing
// it the string representation of the error.
func (e *ErrorNode) Error(msg string) string {
s := errorWithPC(msg, e.pc)
if e.cause != nil {
s += e.cause.Error() + "\n"
}
return s
}
// Cause returns the error that preceded this error.
func (e *ErrorNode) Cause() error { return e.cause }
// Unwrap provides compatibility for Go 1.13 error chains.
func (e *ErrorNode) Unwrap() error { return e.cause }
// Temporary returns true if the error occurred due to a temporary condition.
func (e ErrorNode) Temporary() bool {
type temporary interface {
Temporary() bool
}
for err := e.cause; err != nil; {
if t, ok := err.(temporary); ok {
return t.Temporary()
}
if cause, ok := err.(causer); ok {
err = cause.Cause()
} else {
err = nil
}
}
return false
}
// Timeout returns true if the error occurred due to time expiring.
func (e ErrorNode) Timeout() bool {
type timeout interface {
Timeout() bool
}
for err := e.cause; err != nil; {
if t, ok := err.(timeout); ok {
return t.Timeout()
}
if cause, ok := err.(causer); ok {
err = cause.Cause()
} else {
err = nil
}
}
return false
}
// Initialize is used to initialize an embedded ErrorNode field.
// It captures the caller's program counter and saves the cause (preceding error).
// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
// value of 3 is very common; but, depending on your code nesting, you may need
// a different value.
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
pc := getPC(callersToSkip)
return ErrorNode{pc: pc, cause: cause}
}
// Cause walks all the preceding errors and return the originating error.
func Cause(err error) error {
for err != nil {
cause, ok := err.(causer)
if !ok {
break
}
err = cause.Cause()
}
return err
}
// ErrorNodeNoCause can be an embedded field in a private error object. This field
// adds Program Counter support.
// When initializing a error type with this embedded field, initialize the
// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize().
type ErrorNodeNoCause struct {
pc uintptr // Represents a Program Counter that you can get symbols for.
}
// Error returns a string with the PC's symbols or "" if the PC is invalid.
// When defining a new error type, have its Error method call this one passing
// it the string representation of the error.
func (e *ErrorNodeNoCause) Error(msg string) string {
return errorWithPC(msg, e.pc)
}
// Temporary returns true if the error occurred due to a temporary condition.
func (e ErrorNodeNoCause) Temporary() bool {
return false
}
// Timeout returns true if the error occurred due to time expiring.
func (e ErrorNodeNoCause) Timeout() bool {
return false
}
// Initialize is used to initialize an embedded ErrorNode field.
// It captures the caller's program counter.
// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip
// value of 3 is very common; but, depending on your code nesting, you may need
// a different value.
func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause {
pc := getPC(callersToSkip)
return ErrorNodeNoCause{pc: pc}
}
// NewError creates a simple string error (like Error.New). But, this
// error also captures the caller's Program Counter and the preceding error (if provided).
func NewError(cause error, msg string) error {
if cause != nil {
return &pcError{
ErrorNode: ErrorNode{}.Initialize(cause, 3),
msg: msg,
}
}
return &pcErrorNoCause{
ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3),
msg: msg,
}
}
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
type pcError struct {
ErrorNode
msg string
}
// Error satisfies the error interface. It shows the error with Program Counter
// symbols and calls Error on the preceding error so you can see the full error chain.
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }
// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC).
type pcErrorNoCause struct {
ErrorNodeNoCause
msg string
}
// Error satisfies the error interface. It shows the error with Program Counter symbols.
func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) }

View file

@ -1,82 +0,0 @@
package pipeline
import "io"
// ********** The following is common between the request body AND the response body.
// ProgressReceiver defines the signature of a callback function invoked as progress is reported.
type ProgressReceiver func(bytesTransferred int64)
// ********** The following are specific to the request body (a ReadSeekCloser)
// This struct is used when sending a body to the network
type requestBodyProgress struct {
requestBody io.ReadSeeker // Seeking is required to support retries
pr ProgressReceiver
}
// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker {
if pr == nil {
panic("pr must not be nil")
}
return &requestBodyProgress{requestBody: requestBody, pr: pr}
}
// Read reads a block of data from an inner stream and reports progress
func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) {
n, err = rbp.requestBody.Read(p)
if err != nil {
return
}
// Invokes the user's callback method to report progress
position, err := rbp.requestBody.Seek(0, io.SeekCurrent)
if err != nil {
panic(err)
}
rbp.pr(position)
return
}
func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
return rbp.requestBody.Seek(offset, whence)
}
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
func (rbp *requestBodyProgress) Close() error {
if c, ok := rbp.requestBody.(io.Closer); ok {
return c.Close()
}
return nil
}
// ********** The following are specific to the response body (a ReadCloser)
// This struct is used when sending a body to the network
type responseBodyProgress struct {
responseBody io.ReadCloser
pr ProgressReceiver
offset int64
}
// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser {
if pr == nil {
panic("pr must not be nil")
}
return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0}
}
// Read reads a block of data from an inner stream and reports progress
func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) {
n, err = rbp.responseBody.Read(p)
rbp.offset += int64(n)
// Invokes the user's callback method to report progress
rbp.pr(rbp.offset)
return
}
func (rbp *responseBodyProgress) Close() error {
return rbp.responseBody.Close()
}

View file

@ -1,147 +0,0 @@
package pipeline
import (
"io"
"net/http"
"net/url"
"strconv"
)
// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
type Request struct {
*http.Request
}
// NewRequest initializes a new HTTP request object with any desired options.
func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) {
// Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
// This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
request.Request = &http.Request{
Method: method,
URL: &url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: url.Host,
}
if body != nil {
err = request.SetBody(body)
}
return
}
// SetBody sets the body and content length, assumes body is not nil.
func (r Request) SetBody(body io.ReadSeeker) error {
size, err := body.Seek(0, io.SeekEnd)
if err != nil {
return err
}
body.Seek(0, io.SeekStart)
r.ContentLength = size
r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)}
if size != 0 {
r.Body = &retryableRequestBody{body: body}
r.GetBody = func() (io.ReadCloser, error) {
_, err := body.Seek(0, io.SeekStart)
if err != nil {
return nil, err
}
return r.Body, nil
}
} else {
// in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
r.Body = http.NoBody
r.GetBody = func() (io.ReadCloser, error) {
return http.NoBody, nil
}
// close the user-provided empty body
if c, ok := body.(io.Closer); ok {
c.Close()
}
}
return nil
}
// Copy makes a copy of an http.Request. Specifically, it makes a deep copy
// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
// Cancel, Response, and ctx fields. Copy panics if any of these fields are
// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
func (r Request) Copy() Request {
if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil {
panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" +
"TransferEncoding, Form, PostForm, MultipartForm, or Trailer.")
}
copy := *r.Request // Copy the request
urlCopy := *(r.Request.URL) // Copy the URL
copy.URL = &urlCopy
copy.Header = http.Header{} // Copy the header
for k, vs := range r.Header {
for _, value := range vs {
copy.Header.Add(k, value)
}
}
return Request{Request: &copy} // Return the copy
}
func (r Request) close() error {
if r.Body != nil && r.Body != http.NoBody {
c, ok := r.Body.(*retryableRequestBody)
if !ok {
panic("unexpected request body type (should be *retryableReadSeekerCloser)")
}
return c.realClose()
}
return nil
}
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
func (r Request) RewindBody() error {
if r.Body != nil && r.Body != http.NoBody {
s, ok := r.Body.(io.Seeker)
if !ok {
panic("unexpected request body type (should be io.Seeker)")
}
// Reset the stream back to the beginning
_, err := s.Seek(0, io.SeekStart)
return err
}
return nil
}
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
// This struct is used when sending a body to the network
type retryableRequestBody struct {
body io.ReadSeeker // Seeking is required to support retries
}
// Read reads a block of data from an inner stream and reports progress
func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
return b.body.Read(p)
}
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
return b.body.Seek(offset, whence)
}
func (b *retryableRequestBody) Close() error {
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
// The pipeline closes the request body upon success.
return nil
}
func (b *retryableRequestBody) realClose() error {
if c, ok := b.body.(io.Closer); ok {
return c.Close()
}
return nil
}

View file

@ -1,74 +0,0 @@
package pipeline
import (
"bytes"
"fmt"
"net/http"
"sort"
"strings"
)
// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
// The method that injected the method-specific Factory gets this returned Response and performs a type assertion
// to the expected struct and returns the struct to its caller.
type Response interface {
Response() *http.Response
}
// This is the default struct that has the http.Response.
// A method can replace this struct with its own struct containing an http.Response
// field and any other additional fields.
type httpResponse struct {
response *http.Response
}
// NewHTTPResponse is typically called by a Policy object to return a Response object.
func NewHTTPResponse(response *http.Response) Response {
return &httpResponse{response: response}
}
// This method satisfies the public Response interface's Response method
func (r httpResponse) Response() *http.Response {
return r.response
}
// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
// not nil, then these are also written into the Buffer.
func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) {
// Write the request into the buffer.
fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n")
writeHeader(b, request.Header)
if response != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n")
writeHeader(b, response.Header)
}
if err != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
}
}
// formatHeaders appends an HTTP request's or response's header into a Buffer.
func writeHeader(b *bytes.Buffer, header map[string][]string) {
if len(header) == 0 {
b.WriteString(" (no headers)\n")
return
}
keys := make([]string, 0, len(header))
// Alphabetize the headers
for k := range header {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
// Redact the value of any Authorization header to prevent security information from persisting in logs
value := interface{}("REDACTED")
if !strings.EqualFold(k, "Authorization") {
value = header[k]
}
fmt.Fprintf(b, " %s: %+v\n", k, value)
}
}

View file

@ -1,9 +0,0 @@
package pipeline
const (
// UserAgent is the string to be used in the user agent string when making requests.
UserAgent = "azure-pipeline-go/" + Version
// Version is the semantic version (see http://semver.org) of the pipeline package.
Version = "0.2.1"
)

View file

@ -0,0 +1,514 @@
# Release History
## 1.3.0 (2023-01-06)
### Features Added
* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy`
with custom authorization logic
* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively.
### Other Changes
* Updated `internal` module to latest version.
* `policy/Request.SetBody()` allows replacing a request's body with an empty one
## 1.2.0 (2022-11-04)
### Features Added
* Added `ClientOptions.APIVersion` field, which overrides the default version a client
requests of the service, if the client supports this (all ARM clients do).
* Added package `tracing` that contains the building blocks for distributed tracing.
* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation.
### Bugs Fixed
* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`.
* Fixed the MaxRetryDelay default to be 60s.
* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior.
* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost.
### Other Changes
* Retain contents of read-only fields when sending requests.
## 1.1.4 (2022-10-06)
### Bugs Fixed
* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`.
* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string
### Other Changes
* Removed logging URL from retry policy as it's redundant.
* Retry policy logs when it exits due to a non-retriable status code.
## 1.1.3 (2022-09-01)
### Bugs Fixed
* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines.
## 1.1.2 (2022-08-09)
### Other Changes
* Fixed various doc bugs.
## 1.1.1 (2022-06-30)
### Bugs Fixed
* Avoid polling when a RELO LRO synchronously terminates.
## 1.1.0 (2022-06-03)
### Other Changes
* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests.
## 1.0.0 (2022-05-12)
### Features Added
* Added interface `runtime.PollingHandler` to support custom poller implementations.
* Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`.
### Breaking Changes
* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost`
* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic`
* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions`
* Removed `TokenRequestOptions.TenantID`
* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration`
* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()`
* Removed `arm/runtime.FinalStateVia` and related `const` values
* Renamed `runtime.PageProcessor` to `runtime.PagingHandler`
* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported.
* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()`
* `TokenCredential.GetToken` now returns `AccessToken` by value.
### Bugs Fixed
* When per-try timeouts are enabled, only cancel the context after the body has been read and closed.
* The `Operation-Location` poller now properly handles `final-state-via` values.
* Improvements in `runtime.Poller[T]`
* `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state.
* `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries.
### Other Changes
* Updated to latest `internal` module and absorbed breaking changes.
* Use `temporal.Resource` and deleted copy.
* The internal poller implementation has been refactored.
* The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification.
* The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface.
* The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it.
* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions`
* Default User-Agent headers no longer include `azcore` version information
## 0.23.1 (2022-04-14)
### Bugs Fixed
* Include XML header when marshalling XML content.
* Handle XML namespaces when searching for error code.
* Handle `odata.error` when searching for error code.
## 0.23.0 (2022-04-04)
### Features Added
* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations.
* Added `cloud` package with a new API for cloud configuration
* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type.
### Breaking Changes
* Removed the `Poller` type-alias to the internal poller implementation.
* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations.
* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter.
* Replaced `arm.Endpoint` with `cloud` API
* Removed the `endpoint` parameter from `NewRPRegistrationPolicy()`
* `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error`
* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages.
* Removed the `pollerID` parameter as it's no longer required.
* Created optional parameter structs and moved optional parameters into them.
* Changed `FinalStateVia` field to a `const` type.
### Other Changes
* Converted expiring resource and dependent types to use generics.
## 0.22.0 (2022-03-03)
### Features Added
* Added header `WWW-Authenticate` to the default allow-list of headers for logging.
* Added a pipeline policy that enables the retrieval of HTTP responses from API calls.
* Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default).
### Breaking Changes
* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package.
## 0.21.1 (2022-02-04)
### Bugs Fixed
* Restore response body after reading in `Poller.FinalResponse()`. (#16911)
* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969)
### Other Changes
* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789)
## 0.21.0 (2022-01-11)
### Features Added
* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger.
* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received.
### Breaking Changes
* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions`
* Renamed `arm/ClientOptions.Host` to `.Endpoint`
* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload`
* Removed `azcore.HTTPResponse` interface type
* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter
* `runtime.NewResponseError()` no longer requires an `error` parameter
## 0.20.0 (2021-10-22)
### Breaking Changes
* Removed `arm.Connection`
* Removed `azcore.Credential` and `.NewAnonymousCredential()`
* `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential`
* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication
* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions`
* Contents in the `log` package have been slightly renamed.
* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions`
* Changed parameters for `NewBearerTokenPolicy()`
* Moved policy config options out of `arm/runtime` and into `arm/policy`
### Features Added
* Updating Documentation
* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints
* `azcore.ClientOptions` contains common pipeline configuration settings
* Added support for multi-tenant authorization in `arm/runtime`
* Require one second minimum when calling `PollUntilDone()`
### Bug Fixes
* Fixed a potential panic when creating the default Transporter.
* Close LRO initial response body when creating a poller.
* Fixed a panic when recursively cloning structs that contain time.Time.
## 0.19.0 (2021-08-25)
### Breaking Changes
* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors).
* `azcore` has all core functionality.
* `log` contains facilities for configuring in-box logging.
* `policy` is used for configuring pipeline options and creating custom pipeline policies.
* `runtime` contains various helpers used by SDK authors and generated content.
* `streaming` has helpers for streaming IO operations.
* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed.
* As a result, the `Request.Telemetry()` method has been removed.
* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it.
* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it.
* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively.
### Bug Fixes
* Fixed an issue in the retry policy where the request body could be overwritten after a rewind.
### Other Changes
* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively.
* The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy.
* Poller logic has been consolidated across ARM and core implementations.
* This required some changes to the internal interfaces for core pollers.
* The core poller types have been improved, including more logging and test coverage.
## 0.18.1 (2021-08-20)
### Features Added
* Adds an `ETag` type for comparing etags and handling etags on requests
* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object
### Bugs Fixed
* `JoinPaths` will preserve query parameters encoded in the `root` url.
### Other Changes
* Bumps dependency on `internal` module to the latest version (v0.7.0)
## 0.18.0 (2021-07-29)
### Features Added
* Replaces methods from Logger type with two package methods for interacting with the logging functionality.
* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications`
* `azcore.SetListener` replaces `azcore.Logger().SetListener`
### Breaking Changes
* Removes `Logger` type from `azcore`
## 0.17.0 (2021-07-27)
### Features Added
* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879)
* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123)
### Breaking Changes
* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104)
* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103)
* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038)
## 0.16.2 (2021-05-26)
### Features Added
* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715)
## 0.16.1 (2021-05-19)
### Features Added
* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682)
## 0.16.0 (2021-05-07)
### Features Added
* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642)
## 0.15.1 (2021-05-06)
### Features Added
* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634)
## 0.15.0 (2021-05-05)
### Features Added
* Add support for null map and slice
* Export `Response.Payload` method
### Breaking Changes
* remove `Response.UnmarshalError` as it's no longer required
## 0.14.5 (2021-04-23)
### Features Added
* Add `UnmarshalError()` on `azcore.Response`
## 0.14.4 (2021-04-22)
### Features Added
* Support for basic LRO polling
* Added type `LROPoller` and supporting types for basic polling on long running operations.
* rename poller param and added doc comment
### Bugs Fixed
* Fixed content type detection bug in logging.
## 0.14.3 (2021-03-29)
### Features Added
* Add support for multi-part form data
* Added method `WriteMultipartFormData()` to Request.
## 0.14.2 (2021-03-17)
### Features Added
* Add support for encoding JSON null values
* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null.
* Documentation fixes
### Bugs Fixed
* Fixed improper error wrapping
## 0.14.1 (2021-02-08)
### Features Added
* Add `Pager` and `Poller` interfaces to azcore
## 0.14.0 (2021-01-12)
### Features Added
* Accept zero-value options for default values
* Specify zero-value options structs to accept default values.
* Remove `DefaultXxxOptions()` methods.
* Do not silently change TryTimeout on negative values
* make per-try timeout opt-in
## 0.13.4 (2020-11-20)
### Features Added
* Include telemetry string in User Agent
## 0.13.3 (2020-11-20)
### Features Added
* Updating response body handling on `azcore.Response`
## 0.13.2 (2020-11-13)
### Features Added
* Remove implementation of stateless policies as first-class functions.
## 0.13.1 (2020-11-05)
### Features Added
* Add `Telemetry()` method to `azcore.Request()`
## 0.13.0 (2020-10-14)
### Features Added
* Rename `log` to `logger` to avoid name collision with the log package.
* Documentation improvements
* Simplified `DefaultHTTPClientTransport()` implementation
## 0.12.1 (2020-10-13)
### Features Added
* Update `internal` module dependence to `v0.5.0`
## 0.12.0 (2020-10-08)
### Features Added
* Removed storage specific content
* Removed internal content to prevent API clutter
* Refactored various policy options to conform with our options pattern
## 0.11.0 (2020-09-22)
### Features Added
* Removed `LogError` and `LogSlowResponse`.
* Renamed `options` in `RequestLogOptions`.
* Updated `NewRequestLogPolicy()` to follow standard pattern for options.
* Refactored `requestLogPolicy.Do()` per above changes.
* Cleaned up/added logging in retry policy.
* Export `NewResponseError()`
* Fix `RequestLogOptions` comment
## 0.10.1 (2020-09-17)
### Features Added
* Add default console logger
* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'.
* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks.
* Add `LogLongRunningOperation`
## 0.10.0 (2020-09-10)
### Features Added
* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library.
* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter.
* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`.
* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request.
* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this.
* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type.
* moved path concatenation into `JoinPaths()` func
## 0.9.6 (2020-08-18)
### Features Added
* Improvements to body download policy
* Always download the response body for error responses, i.e. HTTP status codes >= 400.
* Simplify variable declarations
## 0.9.5 (2020-08-11)
### Features Added
* Set the Content-Length header in `Request.SetBody`
## 0.9.4 (2020-08-03)
### Features Added
* Fix cancellation of per try timeout
* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time.
* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion.
* Do not drain response body if there are no more retries
* Do not retry non-idempotent operations when body download fails
## 0.9.3 (2020-07-28)
### Features Added
* Add support for custom HTTP request headers
* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request.
* Use `azcore.WithHTTPHeader` to add HTTP headers to a context.
* Remove method specific to Go 1.14
## 0.9.2 (2020-07-28)
### Features Added
* Omit read-only content from request payloads
* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation.
* Verify no fields were dropped
* Handle embedded struct types
* Added test for cloning by value
* Add messages to failures
## 0.9.1 (2020-07-22)
### Features Added
* Updated dependency on internal module to fix race condition.
## 0.9.0 (2020-07-09)
### Features Added
* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure.
* Updated `sdk/internal` dependency to latest version.
* Rename package alias
## 0.8.2 (2020-06-29)
### Features Added
* Added missing documentation comments
### Bugs Fixed
* Fixed a bug in body download policy.
## 0.8.1 (2020-06-26)
### Features Added
* Miscellaneous clean-up reported by linters
## 0.8.0 (2020-06-01)
### Features Added
* Differentiate between standard and URL encoding.
## 0.7.1 (2020-05-27)
### Features Added
* Add support for for base64 encoding and decoding of payloads.
## 0.7.0 (2020-05-12)
### Features Added
* Change `RetryAfter()` to a function.
## 0.6.0 (2020-04-29)
### Features Added
* Updating `RetryAfter` to only return the detaion in the RetryAfter header
## 0.5.0 (2020-03-23)
### Features Added
* Export `TransportFunc`
### Breaking Changes
* Removed `IterationDone`
## 0.4.1 (2020-02-25)
### Features Added
* Ensure per-try timeout is properly cancelled
* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy.
* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed.
* `Logger.Should()` will return false if no listener is set.
## 0.4.0 (2020-02-18)
### Features Added
* Enable custom `RetryOptions` to be specified per API call
* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call.
* Remove 429 from the list of default HTTP status codes for retry.
* Change StatusCodesForRetry to a slice so consumers can append to it.
* Added support for retry-after in HTTP-date format.
* Cleaned up some comments specific to storage.
* Remove `Request.SetQueryParam()`
* Renamed `MaxTries` to `MaxRetries`
## 0.3.0 (2020-01-16)
### Features Added
* Added `DefaultRetryOptions` to create initialized default options.
### Breaking Changes
* Removed `Response.CheckStatusCode()`
## 0.2.0 (2020-01-15)
### Features Added
* Add support for marshalling and unmarshalling JSON
* Removed `Response.Payload` field
* Exit early when unmarsahlling if there is no payload
## 0.1.0 (2020-01-10)
### Features Added
* Initial release

View file

@ -1,8 +1,6 @@
MIT License
Copyright (c) 2014 mattn
Copyright (c) 2017 oliverpool
Copyright (c) 2019 Adele Reed
Copyright (c) Microsoft Corporation.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@ -20,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
SOFTWARE

View file

@ -0,0 +1,39 @@
# Azure Core Client Module for Go
[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore)
[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main)
[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)
The `azcore` module provides a set of common interfaces and types for Go SDK client modules.
These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html).
## Getting started
This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management.
Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency.
To add the latest version to your `go.mod` file, execute the following command.
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/azcore
```
General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore).
## Contributing
This project welcomes contributions and suggestions. Most contributions require
you to agree to a Contributor License Agreement (CLA) declaring that you have
the right to, and actually do, grant us the rights to use your contribution.
For details, visit [https://cla.microsoft.com](https://cla.microsoft.com).
When you submit a pull request, a CLA-bot will automatically determine whether
you need to provide a CLA and decorate the PR appropriately (e.g., label,
comment). Simply follow the instructions provided by the bot. You will only
need to do this once across all repos using our CLA.
This project has adopted the
[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information, see the
[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
additional questions or comments.

View file

@ -0,0 +1,29 @@
# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file.
trigger:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/azcore/
- eng/
pr:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/azcore/
- eng/
stages:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: azcore

View file

@ -0,0 +1,44 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package cloud
var (
// AzureChina contains configuration for Azure China.
AzureChina = Configuration{
ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{},
}
// AzureGovernment contains configuration for Azure Government.
AzureGovernment = Configuration{
ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{},
}
// AzurePublic contains configuration for Azure Public Cloud.
AzurePublic = Configuration{
ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{},
}
)
// ServiceName identifies a cloud service.
type ServiceName string
// ResourceManager is a global constant identifying Azure Resource Manager.
const ResourceManager ServiceName = "resourceManager"
// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager.
type ServiceConfiguration struct {
// Audience is the audience the client will request for its access tokens.
Audience string
// Endpoint is the service's base URL.
Endpoint string
}
// Configuration configures a cloud.
type Configuration struct {
// ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory.
ActiveDirectoryAuthorityHost string
// Services contains configuration for the cloud's services.
Services map[ServiceName]ServiceConfiguration
}

View file

@ -0,0 +1,53 @@
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
/*
Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds.
Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as
"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other
Azure Clouds to configure clients appropriately.
This package contains predefined configuration for well-known sovereign clouds such as Azure Government and
Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For
example, configuring a credential and ARM client for Azure Government:
opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment}
cred, err := azidentity.NewDefaultAzureCredential(
&azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
)
handle(err)
client, err := armsubscription.NewClient(
cred, &arm.ClientOptions{ClientOptions: opts},
)
handle(err)
Applications deployed to a private cloud such as Azure Stack create a Configuration object with
appropriate values:
c := cloud.Configuration{
ActiveDirectoryAuthorityHost: "https://...",
Services: map[cloud.ServiceName]cloud.ServiceConfiguration{
cloud.ResourceManager: {
Audience: "...",
Endpoint: "https://...",
},
},
}
opts := azcore.ClientOptions{Cloud: c}
cred, err := azidentity.NewDefaultAzureCredential(
&azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
)
handle(err)
client, err := armsubscription.NewClient(
cred, &arm.ClientOptions{ClientOptions: opts},
)
handle(err)
*/
package cloud

View file

@ -0,0 +1,113 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azcore
import (
"reflect"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
)
// AccessToken represents an Azure service bearer access token with expiry information.
type AccessToken = exported.AccessToken
// TokenCredential represents a credential capable of providing an OAuth token.
type TokenCredential = exported.TokenCredential
// holds sentinel values used to send nulls
var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{}
// NullValue is used to send an explicit 'null' within a request.
// This is typically used in JSON-MERGE-PATCH operations to delete a value.
func NullValue[T any]() T {
t := shared.TypeOfT[T]()
v, found := nullables[t]
if !found {
var o reflect.Value
if k := t.Kind(); k == reflect.Map {
o = reflect.MakeMap(t)
} else if k == reflect.Slice {
// empty slices appear to all point to the same data block
// which causes comparisons to become ambiguous. so we create
// a slice with len/cap of one which ensures a unique address.
o = reflect.MakeSlice(t, 1, 1)
} else {
o = reflect.New(t.Elem())
}
v = o.Interface()
nullables[t] = v
}
// return the sentinel object
return v.(T)
}
// IsNullValue returns true if the field contains a null sentinel value.
// This is used by custom marshallers to properly encode a null value.
func IsNullValue[T any](v T) bool {
// see if our map has a sentinel object for this *T
t := reflect.TypeOf(v)
if o, found := nullables[t]; found {
o1 := reflect.ValueOf(o)
v1 := reflect.ValueOf(v)
// we found it; return true if v points to the sentinel object.
// NOTE: maps and slices can only be compared to nil, else you get
// a runtime panic. so we compare addresses instead.
return o1.Pointer() == v1.Pointer()
}
// no sentinel object for this *t
return false
}
// ClientOptions contains configuration settings for a client's pipeline.
type ClientOptions = policy.ClientOptions
// Client is a basic HTTP client. It consists of a pipeline and tracing provider.
type Client struct {
pl runtime.Pipeline
tr tracing.Tracer
}
// NewClient creates a new Client instance with the provided values.
// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans
// - moduleVersion - the semantic version of the containing module; used by the telemetry policy
// - plOpts - pipeline configuration options; can be the zero-value
// - options - optional client configurations; pass nil to accept the default values
func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) {
pkg, err := shared.ExtractPackageName(clientName)
if err != nil {
return nil, err
}
if options == nil {
options = &ClientOptions{}
}
if !options.Telemetry.Disabled {
if err := shared.ValidateModVer(moduleVersion); err != nil {
return nil, err
}
}
pl := runtime.NewPipeline(pkg, moduleVersion, plOpts, options)
tr := options.TracingProvider.NewTracer(clientName, moduleVersion)
return &Client{pl: pl, tr: tr}, nil
}
// Pipeline returns the pipeline for this client.
func (c *Client) Pipeline() runtime.Pipeline {
return c.pl
}
// Tracer returns the tracer for this client.
func (c *Client) Tracer() tracing.Tracer {
return c.tr
}

View file

@ -0,0 +1,257 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
/*
Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients.
The middleware consists of three components.
- One or more Policy instances.
- A Transporter instance.
- A Pipeline instance that combines the Policy and Transporter instances.
# Implementing the Policy Interface
A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as
a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share
the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to
avoid race conditions.
A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can
perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers,
and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request
work, it must call the Next() method on the *policy.Request instance in order to pass the request to the
next Policy in the chain.
When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance
can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response
body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response
and error instances to its caller.
Template for implementing a stateless Policy:
type policyFunc func(*policy.Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
return pf(req)
}
func NewMyStatelessPolicy() policy.Policy {
return policyFunc(func(req *policy.Request) (*http.Response, error) {
// TODO: mutate/process Request here
// forward Request to next Policy & get Response/error
resp, err := req.Next()
// TODO: mutate/process Response/error here
// return Response/error to previous Policy
return resp, err
})
}
Template for implementing a stateful Policy:
type MyStatefulPolicy struct {
// TODO: add configuration/setting fields here
}
// TODO: add initialization args to NewMyStatefulPolicy()
func NewMyStatefulPolicy() policy.Policy {
return &MyStatefulPolicy{
// TODO: initialize configuration/setting fields here
}
}
func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
// TODO: mutate/process Request here
// forward Request to next Policy & get Response/error
resp, err := req.Next()
// TODO: mutate/process Response/error here
// return Response/error to previous Policy
return resp, err
}
# Implementing the Transporter Interface
The Transporter interface is responsible for sending the HTTP request and returning the corresponding
HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter
implementation uses a shared http.Client from the standard library.
The same stateful/stateless rules for Policy implementations apply to Transporter implementations.
# Using Policy and Transporter Instances Via a Pipeline
To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function.
func NewPipeline(transport Transporter, policies ...Policy) Pipeline
The specified Policy instances form a chain and are invoked in the order provided to NewPipeline
followed by the Transporter.
Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method.
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
func (p Pipeline) Do(req *Request) (*http.Request, error)
The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter
instances. The response/error is then sent through the same chain of Policy instances in reverse
order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with
TransportA.
pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC)
The flow of Request and Response looks like the following:
policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+
|
HTTP(S) endpoint
|
caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+
# Creating a Request Instance
The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also
contains some internal state and provides various convenience methods. You create a Request instance
by calling the runtime.NewRequest function:
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
If the Request should contain a body, call the SetBody method.
func (req *Request) SetBody(body ReadSeekCloser, contentType string) error
A seekable stream is required so that upon retry, the retry Policy instance can seek the stream
back to the beginning before retrying the network request and re-uploading the body.
# Sending an Explicit Null
Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted.
{
"delete-me": null
}
This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as
a means to resolve the ambiguity between a field to be excluded and its zero-value.
type Widget struct {
Name *string `json:",omitempty"`
Count *int `json:",omitempty"`
}
In the above example, Name and Count are defined as pointer-to-type to disambiguate between
a missing value (nil) and a zero-value (0) which might have semantic differences.
In a PATCH operation, any fields left as nil are to have their values preserved. When updating
a Widget's count, one simply specifies the new value for Count, leaving Name nil.
To fulfill the requirement for sending a JSON null, the NullValue() function can be used.
w := Widget{
Count: azcore.NullValue[*int](),
}
This sends an explict "null" for Count, indicating that any current value for Count should be deleted.
# Processing the Response
When the HTTP response is received, the *http.Response is returned directly. Each Policy instance
can inspect/mutate the *http.Response.
# Built-in Logging
To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program.
By default the logger writes to stderr. This can be customized by calling log.SetListener, providing
a callback that writes to the desired location. Any custom logging implementation MUST provide its
own synchronization to handle concurrent invocations.
See the docs for the log package for further details.
# Pageable Operations
Pageable operations return potentially large data sets spread over multiple GET requests. The result of
each GET is a "page" of data consisting of a slice of items.
Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T].
func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse]
The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages
and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked.
pager := widgetClient.NewListWidgetsPager(nil)
for pager.More() {
page, err := pager.NextPage(context.TODO())
// handle err
for _, widget := range page.Values {
// process widget
}
}
# Long-Running Operations
Long-running operations (LROs) are operations consisting of an initial request to start the operation followed
by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one
of the following values.
- Succeeded - the LRO completed successfully
- Failed - the LRO failed to complete
- Canceled - the LRO was canceled
LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T].
func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error)
When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started.
It does _not_ mean that the widget has been created or updated (or failed to be created/updated).
The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete,
call the PollUntilDone() method.
poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil)
// handle err
result, err := poller.PollUntilDone(context.TODO(), nil)
// handle err
// use result
The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the
context is canceled/timed out.
Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to
this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation
mechanism as required.
# Resume Tokens
Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to
recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method.
token, err := poller.ResumeToken()
// handle error
Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls
to poller.Poll() might change the poller's state. In this case, a new token should be created.
After the token has been obtained, it can be used to recreate an instance of the originating poller.
poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{
ResumeToken: token,
})
When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken.
Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO
BeginA() will result in an error.
*/
package azcore

View file

@ -0,0 +1,14 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azcore
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
type ResponseError = exported.ResponseError

View file

@ -0,0 +1,48 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azcore
import (
"strings"
)
// ETag is a property used for optimistic concurrency during updates
// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2
// An ETag can be empty ("").
type ETag string
// ETagAny is an ETag that represents everything, the value is "*"
const ETagAny ETag = "*"
// Equals does a strong comparison of two ETags. Equals returns true when both
// ETags are not weak and the values of the underlying strings are equal.
func (e ETag) Equals(other ETag) bool {
return !e.IsWeak() && !other.IsWeak() && e == other
}
// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match
// character-by-character, regardless of either or both being tagged as "weak".
func (e ETag) WeakEquals(other ETag) bool {
getStart := func(e1 ETag) int {
if e1.IsWeak() {
return 2
}
return 0
}
aStart := getStart(e)
bStart := getStart(other)
aVal := e[aStart:]
bVal := other[bStart:]
return aVal == bVal
}
// IsWeak specifies whether the ETag is strong or weak.
func (e ETag) IsWeak() bool {
return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"")
}

View file

@ -0,0 +1,83 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"context"
"io"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
type nopCloser struct {
io.ReadSeeker
}
func (n nopCloser) Close() error {
return nil
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
// Exported as streaming.NopCloser().
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return nopCloser{rs}
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
// Exported as runtime.HasStatusCode().
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
if resp == nil {
return false
}
for _, sc := range statusCodes {
if resp.StatusCode == sc {
return true
}
}
return false
}
// Payload reads and returns the response body or an error.
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
// Exported as runtime.Payload().
func Payload(resp *http.Response) ([]byte, error) {
// r.Body won't be a nopClosingBytesReader if downloading was skipped
if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
return buf.Bytes(), nil
}
bytesBody, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
resp.Body = shared.NewNopClosingBytesReader(bytesBody)
return bytesBody, nil
}
// AccessToken represents an Azure service bearer access token with expiry information.
// Exported as azcore.AccessToken.
type AccessToken struct {
Token string
ExpiresOn time.Time
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
// Exported as policy.TokenRequestOptions.
type TokenRequestOptions struct {
// Scopes contains the list of permission scopes required for the token.
Scopes []string
}
// TokenCredential represents a credential capable of providing an OAuth token.
// Exported as azcore.TokenCredential.
type TokenCredential interface {
// GetToken requests an access token for the specified set of scopes.
GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error)
}

View file

@ -0,0 +1,97 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"errors"
"fmt"
"net/http"
"golang.org/x/net/http/httpguts"
)
// Policy represents an extensibility point for the Pipeline that can mutate the specified
// Request and react to the received Response.
// Exported as policy.Policy.
type Policy interface {
// Do applies the policy to the specified Request. When implementing a Policy, mutate the
// request before calling req.Next() to move on to the next policy, and respond to the result
// before returning to the caller.
Do(req *Request) (*http.Response, error)
}
// Pipeline represents a primitive for sending HTTP requests and receiving responses.
// Its behavior can be extended by specifying policies during construction.
// Exported as runtime.Pipeline.
type Pipeline struct {
policies []Policy
}
// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
// Exported as policy.Transporter.
type Transporter interface {
// Do sends the HTTP request and returns the HTTP response or error.
Do(req *http.Request) (*http.Response, error)
}
// used to adapt a TransportPolicy to a Policy
type transportPolicy struct {
trans Transporter
}
func (tp transportPolicy) Do(req *Request) (*http.Response, error) {
if tp.trans == nil {
return nil, errors.New("missing transporter")
}
resp, err := tp.trans.Do(req.Raw())
if err != nil {
return nil, err
} else if resp == nil {
// there was no response and no error (rare but can happen)
// this ensures the retry policy will retry the request
return nil, errors.New("received nil response")
}
return resp, nil
}
// NewPipeline creates a new Pipeline object from the specified Policies.
// Not directly exported, but used as part of runtime.NewPipeline().
func NewPipeline(transport Transporter, policies ...Policy) Pipeline {
// transport policy must always be the last in the slice
policies = append(policies, transportPolicy{trans: transport})
return Pipeline{
policies: policies,
}
}
// Do is called for each and every HTTP request. It passes the request through all
// the Policy objects (which can transform the Request's URL/query parameters/headers)
// and ultimately sends the transformed HTTP request over the network.
func (p Pipeline) Do(req *Request) (*http.Response, error) {
if req == nil {
return nil, errors.New("request cannot be nil")
}
// check copied from Transport.roundTrip()
for k, vv := range req.Raw().Header {
if !httpguts.ValidHeaderFieldName(k) {
if req.Raw().Body != nil {
req.Raw().Body.Close()
}
return nil, fmt.Errorf("invalid header field name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
if req.Raw().Body != nil {
req.Raw().Body.Close()
}
return nil, fmt.Errorf("invalid header field value %q for key %v", v, k)
}
}
}
req.policies = p.policies
return req.Next()
}

View file

@ -0,0 +1,170 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"reflect"
"strconv"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
// Don't use this type directly, use NewRequest() instead.
// Exported as policy.Request.
type Request struct {
req *http.Request
body io.ReadSeekCloser
policies []Policy
values opValues
}
type opValues map[reflect.Type]interface{}
// Set adds/changes a value
func (ov opValues) set(value interface{}) {
ov[reflect.TypeOf(value)] = value
}
// Get looks for a value set by SetValue first
func (ov opValues) get(value interface{}) bool {
v, ok := ov[reflect.ValueOf(value).Elem().Type()]
if ok {
reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v))
}
return ok
}
// NewRequest creates a new Request with the specified input.
// Exported as runtime.NewRequest().
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) {
req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil)
if err != nil {
return nil, err
}
if req.URL.Host == "" {
return nil, errors.New("no Host in request URL")
}
if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") {
return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme)
}
return &Request{req: req}, nil
}
// Body returns the original body specified when the Request was created.
func (req *Request) Body() io.ReadSeekCloser {
return req.body
}
// Raw returns the underlying HTTP request.
func (req *Request) Raw() *http.Request {
return req.req
}
// Next calls the next policy in the pipeline.
// If there are no more policies, nil and an error are returned.
// This method is intended to be called from pipeline policies.
// To send a request through a pipeline call Pipeline.Do().
func (req *Request) Next() (*http.Response, error) {
if len(req.policies) == 0 {
return nil, errors.New("no more policies")
}
nextPolicy := req.policies[0]
nextReq := *req
nextReq.policies = nextReq.policies[1:]
return nextPolicy.Do(&nextReq)
}
// SetOperationValue adds/changes a mutable key/value associated with a single operation.
func (req *Request) SetOperationValue(value interface{}) {
if req.values == nil {
req.values = opValues{}
}
req.values.set(value)
}
// OperationValue looks for a value set by SetOperationValue().
func (req *Request) OperationValue(value interface{}) bool {
if req.values == nil {
return false
}
return req.values.get(value)
}
// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length
// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "",
// Content-Type won't be set.
func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error {
var err error
var size int64
if body != nil {
size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
if err != nil {
return err
}
}
if size == 0 {
// treat an empty stream the same as a nil one: assign req a nil body
body = nil
// RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content
// (Del is a no-op when the header has no value)
req.req.Header.Del(shared.HeaderContentLength)
} else {
_, err = body.Seek(0, io.SeekStart)
if err != nil {
return err
}
req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
req.Raw().GetBody = func() (io.ReadCloser, error) {
_, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
return body, err
}
}
// keep a copy of the body argument. this is to handle cases
// where req.Body is replaced, e.g. httputil.DumpRequest and friends.
req.body = body
req.req.Body = body
req.req.ContentLength = size
if contentType == "" {
// Del is a no-op when the header has no value
req.req.Header.Del(shared.HeaderContentType)
} else {
req.req.Header.Set(shared.HeaderContentType, contentType)
}
return nil
}
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
func (req *Request) RewindBody() error {
if req.body != nil {
// Reset the stream back to the beginning and restore the body
_, err := req.body.Seek(0, io.SeekStart)
req.req.Body = req.body
return err
}
return nil
}
// Close closes the request body.
func (req *Request) Close() error {
if req.body == nil {
return nil
}
return req.body.Close()
}
// Clone returns a deep copy of the request with its context changed to ctx.
func (req *Request) Clone(ctx context.Context) *Request {
r2 := *req
r2.req = req.req.Clone(ctx)
return &r2
}

View file

@ -0,0 +1,142 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"regexp"
)
// NewResponseError creates a new *ResponseError from the provided HTTP response.
// Exported as runtime.NewResponseError().
func NewResponseError(resp *http.Response) error {
respErr := &ResponseError{
StatusCode: resp.StatusCode,
RawResponse: resp,
}
// prefer the error code in the response header
if ec := resp.Header.Get("x-ms-error-code"); ec != "" {
respErr.ErrorCode = ec
return respErr
}
// if we didn't get x-ms-error-code, check in the response body
body, err := Payload(resp)
if err != nil {
return err
}
if len(body) > 0 {
if code := extractErrorCodeJSON(body); code != "" {
respErr.ErrorCode = code
} else if code := extractErrorCodeXML(body); code != "" {
respErr.ErrorCode = code
}
}
return respErr
}
func extractErrorCodeJSON(body []byte) string {
var rawObj map[string]interface{}
if err := json.Unmarshal(body, &rawObj); err != nil {
// not a JSON object
return ""
}
// check if this is a wrapped error, i.e. { "error": { ... } }
// if so then unwrap it
if wrapped, ok := rawObj["error"]; ok {
unwrapped, ok := wrapped.(map[string]interface{})
if !ok {
return ""
}
rawObj = unwrapped
} else if wrapped, ok := rawObj["odata.error"]; ok {
// check if this a wrapped odata error, i.e. { "odata.error": { ... } }
unwrapped, ok := wrapped.(map[string]any)
if !ok {
return ""
}
rawObj = unwrapped
}
// now check for the error code
code, ok := rawObj["code"]
if !ok {
return ""
}
codeStr, ok := code.(string)
if !ok {
return ""
}
return codeStr
}
func extractErrorCodeXML(body []byte) string {
// regular expression is much easier than dealing with the XML parser
rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`)
res := rx.FindStringSubmatch(string(body))
if len(res) != 2 {
return ""
}
// first submatch is the entire thing, second one is the captured error code
return res[1]
}
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
// Exported as azcore.ResponseError.
type ResponseError struct {
// ErrorCode is the error code returned by the resource provider if available.
ErrorCode string
// StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants.
StatusCode int
// RawResponse is the underlying HTTP response.
RawResponse *http.Response
}
// Error implements the error interface for type ResponseError.
// Note that the message contents are not contractual and can change over time.
func (e *ResponseError) Error() string {
// write the request method and URL with response status code
msg := &bytes.Buffer{}
fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path)
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status)
if e.ErrorCode != "" {
fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode)
} else {
fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
body, err := Payload(e.RawResponse)
if err != nil {
// this really shouldn't fail at this point as the response
// body is already cached (it was read in NewResponseError)
fmt.Fprintf(msg, "Error reading response body: %v", err)
} else if len(body) > 0 {
if err := json.Indent(msg, body, "", " "); err != nil {
// failed to pretty-print so just dump it verbatim
fmt.Fprint(msg, string(body))
}
// the standard library doesn't have a pretty-printer for XML
fmt.Fprintln(msg)
} else {
fmt.Fprintln(msg, "Response contained no body")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
return msg.String()
}

View file

@ -0,0 +1,38 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// This is an internal helper package to combine the complete logging APIs.
package log
import (
azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
type Event = log.Event
const (
EventRequest = azlog.EventRequest
EventResponse = azlog.EventResponse
EventRetryPolicy = azlog.EventRetryPolicy
EventLRO = azlog.EventLRO
)
func Write(cls log.Event, msg string) {
log.Write(cls, msg)
}
func Writef(cls log.Event, format string, a ...interface{}) {
log.Writef(cls, format, a...)
}
func SetListener(lst func(Event, string)) {
log.SetListener(lst)
}
func Should(cls log.Event) bool {
return log.Should(cls)
}

View file

@ -0,0 +1,158 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package async
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md
// Applicable returns true if the LRO is using Azure-AsyncOperation.
func Applicable(resp *http.Response) bool {
return resp.Header.Get(shared.HeaderAzureAsync) != ""
}
// CanResume returns true if the token can rehydrate this poller type.
func CanResume(token map[string]interface{}) bool {
_, ok := token["asyncURL"]
return ok
}
// Poller is an LRO poller that uses the Azure-AsyncOperation pattern.
type Poller[T any] struct {
pl exported.Pipeline
resp *http.Response
// The URL from Azure-AsyncOperation header.
AsyncURL string `json:"asyncURL"`
// The URL from Location header.
LocURL string `json:"locURL"`
// The URL from the initial LRO request.
OrigURL string `json:"origURL"`
// The HTTP method from the initial LRO request.
Method string `json:"method"`
// The value of final-state-via from swagger, can be the empty string.
FinalState pollers.FinalStateVia `json:"finalState"`
// The LRO's current state.
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response and final-state type.
// Pass nil for response to create an empty Poller for rehydration.
func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.")
return &Poller[T]{pl: pl}, nil
}
log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.")
asyncURL := resp.Header.Get(shared.HeaderAzureAsync)
if asyncURL == "" {
return nil, errors.New("response is missing Azure-AsyncOperation header")
}
if !pollers.IsValidURL(asyncURL) {
return nil, fmt.Errorf("invalid polling URL %s", asyncURL)
}
// check for provisioning state. if the operation is a RELO
// and terminates synchronously this will prevent extra polling.
// it's ok if there's no provisioning state.
state, _ := pollers.GetProvisioningState(resp)
if state == "" {
state = pollers.StatusInProgress
}
p := &Poller[T]{
pl: pl,
resp: resp,
AsyncURL: asyncURL,
LocURL: resp.Header.Get(shared.HeaderLocation),
OrigURL: resp.Request.URL.String(),
Method: resp.Request.Method,
FinalState: finalState,
CurState: state,
}
return p, nil
}
// Done returns true if the LRO is in a terminal state.
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
}
// Poll retrieves the current state of the LRO.
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) {
if !pollers.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
state, err := pollers.GetStatus(resp)
if err != nil {
return "", err
} else if state == "" {
return "", errors.New("the response did not contain a status")
}
p.resp = resp
p.CurState = state
return p.CurState, nil
})
if err != nil {
return nil, err
}
return p.resp, nil
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
if p.resp.StatusCode == http.StatusNoContent {
return nil
} else if pollers.Failed(p.CurState) {
return exported.NewResponseError(p.resp)
}
var req *exported.Request
var err error
if p.Method == http.MethodPatch || p.Method == http.MethodPut {
// for PATCH and PUT, the final GET is on the original resource URL
req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
} else if p.Method == http.MethodPost {
if p.FinalState == pollers.FinalStateViaAzureAsyncOp {
// no final GET required
} else if p.FinalState == pollers.FinalStateViaOriginalURI {
req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
} else if p.LocURL != "" {
// ideally FinalState would be set to "location" but it isn't always.
// must check last due to more permissive condition.
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
}
}
if err != nil {
return err
}
// if a final GET request has been created, execute it
if req != nil {
resp, err := p.pl.Do(req)
if err != nil {
return err
}
p.resp = resp
}
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}

View file

@ -0,0 +1,134 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package body
import (
"context"
"errors"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
)
// Kind is the identifier of this type in a resume token.
const kind = "body"
// Applicable returns true if the LRO is using no headers, just provisioning state.
// This is only applicable to PATCH and PUT methods and assumes no polling headers.
func Applicable(resp *http.Response) bool {
// we can't check for absense of headers due to some misbehaving services
// like redis that return a Location header but don't actually use that protocol
return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut
}
// CanResume returns true if the token can rehydrate this poller type.
func CanResume(token map[string]interface{}) bool {
t, ok := token["type"]
if !ok {
return false
}
tt, ok := t.(string)
if !ok {
return false
}
return tt == kind
}
// Poller is an LRO poller that uses the Body pattern.
type Poller[T any] struct {
pl exported.Pipeline
resp *http.Response
// The poller's type, used for resume token processing.
Type string `json:"type"`
// The URL for polling.
PollURL string `json:"pollURL"`
// The LRO's current state.
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
// Pass nil for response to create an empty Poller for rehydration.
func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Body poller.")
return &Poller[T]{pl: pl}, nil
}
log.Write(log.EventLRO, "Using Body poller.")
p := &Poller[T]{
pl: pl,
resp: resp,
Type: kind,
PollURL: resp.Request.URL.String(),
}
// default initial state to InProgress. depending on the HTTP
// status code and provisioning state, we might change the value.
curState := pollers.StatusInProgress
provState, err := pollers.GetProvisioningState(resp)
if err != nil && !errors.Is(err, pollers.ErrNoBody) {
return nil, err
}
if resp.StatusCode == http.StatusCreated && provState != "" {
// absense of provisioning state is ok for a 201, means the operation is in progress
curState = provState
} else if resp.StatusCode == http.StatusOK {
if provState != "" {
curState = provState
} else if provState == "" {
// for a 200, absense of provisioning state indicates success
curState = pollers.StatusSucceeded
}
} else if resp.StatusCode == http.StatusNoContent {
curState = pollers.StatusSucceeded
}
p.CurState = curState
return p, nil
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
if !pollers.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
if resp.StatusCode == http.StatusNoContent {
p.resp = resp
p.CurState = pollers.StatusSucceeded
return p.CurState, nil
}
state, err := pollers.GetProvisioningState(resp)
if errors.Is(err, pollers.ErrNoBody) {
// a missing response body in non-204 case is an error
return "", err
} else if state == "" {
// a response body without provisioning state is considered terminal success
state = pollers.StatusSucceeded
} else if err != nil {
return "", err
}
p.resp = resp
p.CurState = state
return p.CurState, nil
})
if err != nil {
return nil, err
}
return p.resp, nil
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}

View file

@ -0,0 +1,118 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package loc
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// Kind is the identifier of this type in a resume token.
const kind = "loc"
// Applicable returns true if the LRO is using Location.
func Applicable(resp *http.Response) bool {
return resp.Header.Get(shared.HeaderLocation) != ""
}
// CanResume returns true if the token can rehydrate this poller type.
func CanResume(token map[string]interface{}) bool {
t, ok := token["type"]
if !ok {
return false
}
tt, ok := t.(string)
if !ok {
return false
}
return tt == kind
}
// Poller is an LRO poller that uses the Location pattern.
type Poller[T any] struct {
pl exported.Pipeline
resp *http.Response
Type string `json:"type"`
PollURL string `json:"pollURL"`
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
// Pass nil for response to create an empty Poller for rehydration.
func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Location poller.")
return &Poller[T]{pl: pl}, nil
}
log.Write(log.EventLRO, "Using Location poller.")
locURL := resp.Header.Get(shared.HeaderLocation)
if locURL == "" {
return nil, errors.New("response is missing Location header")
}
if !pollers.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid polling URL %s", locURL)
}
// check for provisioning state. if the operation is a RELO
// and terminates synchronously this will prevent extra polling.
// it's ok if there's no provisioning state.
state, _ := pollers.GetProvisioningState(resp)
if state == "" {
state = pollers.StatusInProgress
}
return &Poller[T]{
pl: pl,
resp: resp,
Type: kind,
PollURL: locURL,
CurState: state,
}, nil
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
// location polling can return an updated polling URL
if h := resp.Header.Get(shared.HeaderLocation); h != "" {
p.PollURL = h
}
// if provisioning state is available, use that. this is only
// for some ARM LRO scenarios (e.g. DELETE with a Location header)
// so if it's missing then use HTTP status code.
provState, _ := pollers.GetProvisioningState(resp)
p.resp = resp
if provState != "" {
p.CurState = provState
} else if resp.StatusCode == http.StatusAccepted {
p.CurState = pollers.StatusInProgress
} else if resp.StatusCode > 199 && resp.StatusCode < 300 {
// any 2xx other than a 202 indicates success
p.CurState = pollers.StatusSucceeded
} else {
p.CurState = pollers.StatusFailed
}
return p.CurState, nil
})
if err != nil {
return nil, err
}
return p.resp, nil
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}

View file

@ -0,0 +1,144 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package op
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// Applicable returns true if the LRO is using Operation-Location.
func Applicable(resp *http.Response) bool {
return resp.Header.Get(shared.HeaderOperationLocation) != ""
}
// CanResume returns true if the token can rehydrate this poller type.
func CanResume(token map[string]interface{}) bool {
_, ok := token["oplocURL"]
return ok
}
// Poller is an LRO poller that uses the Operation-Location pattern.
type Poller[T any] struct {
pl exported.Pipeline
resp *http.Response
OpLocURL string `json:"oplocURL"`
LocURL string `json:"locURL"`
OrigURL string `json:"origURL"`
Method string `json:"method"`
FinalState pollers.FinalStateVia `json:"finalState"`
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
// Pass nil for response to create an empty Poller for rehydration.
func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Operation-Location poller.")
return &Poller[T]{pl: pl}, nil
}
log.Write(log.EventLRO, "Using Operation-Location poller.")
opURL := resp.Header.Get(shared.HeaderOperationLocation)
if opURL == "" {
return nil, errors.New("response is missing Operation-Location header")
}
if !pollers.IsValidURL(opURL) {
return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL)
}
locURL := resp.Header.Get(shared.HeaderLocation)
// Location header is optional
if locURL != "" && !pollers.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid Location URL %s", locURL)
}
// default initial state to InProgress. if the
// service sent us a status then use that instead.
curState := pollers.StatusInProgress
status, err := pollers.GetStatus(resp)
if err != nil && !errors.Is(err, pollers.ErrNoBody) {
return nil, err
}
if status != "" {
curState = status
}
return &Poller[T]{
pl: pl,
resp: resp,
OpLocURL: opURL,
LocURL: locURL,
OrigURL: resp.Request.URL.String(),
Method: resp.Request.Method,
FinalState: finalState,
CurState: curState,
}, nil
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) {
if !pollers.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
state, err := pollers.GetStatus(resp)
if err != nil {
return "", err
} else if state == "" {
return "", errors.New("the response did not contain a status")
}
p.resp = resp
p.CurState = state
return p.CurState, nil
})
if err != nil {
return nil, err
}
return p.resp, nil
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
var req *exported.Request
var err error
if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
} else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost {
// no final GET required, terminal response should have it
} else if rl, rlErr := pollers.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, pollers.ErrNoBody) {
return rlErr
} else if rl != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, rl)
} else if p.Method == http.MethodPatch || p.Method == http.MethodPut {
req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
} else if p.Method == http.MethodPost && p.LocURL != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
}
if err != nil {
return err
}
// if a final GET request has been created, execute it
if req != nil {
resp, err := p.pl.Do(req)
if err != nil {
return err
}
p.resp = resp
}
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}

View file

@ -0,0 +1,24 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package pollers
// FinalStateVia is the enumerated type for the possible final-state-via values.
type FinalStateVia string
const (
// FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL.
FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation"
// FinalStateViaLocation indicates the final payload comes from the Location URL.
FinalStateViaLocation FinalStateVia = "location"
// FinalStateViaOriginalURI indicates the final payload comes from the original URL.
FinalStateViaOriginalURI FinalStateVia = "original-uri"
// FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL.
FinalStateViaOpLocation FinalStateVia = "operation-location"
)

View file

@ -0,0 +1,317 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package pollers
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"reflect"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// the well-known set of LRO status/provisioning state values.
const (
StatusSucceeded = "Succeeded"
StatusCanceled = "Canceled"
StatusFailed = "Failed"
StatusInProgress = "InProgress"
)
// IsTerminalState returns true if the LRO's state is terminal.
func IsTerminalState(s string) bool {
return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
}
// Failed returns true if the LRO's state is terminal failure.
func Failed(s string) bool {
return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
}
// Succeeded returns true if the LRO's state is terminal success.
func Succeeded(s string) bool {
return strings.EqualFold(s, StatusSucceeded)
}
// returns true if the LRO response contains a valid HTTP status code
func StatusCodeValid(resp *http.Response) bool {
return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent)
}
// IsValidURL verifies that the URL is valid and absolute.
func IsValidURL(s string) bool {
u, err := url.Parse(s)
return err == nil && u.IsAbs()
}
// getTokenTypeName creates a type name from the type parameter T.
func getTokenTypeName[T any]() (string, error) {
tt := shared.TypeOfT[T]()
var n string
if tt.Kind() == reflect.Pointer {
n = "*"
tt = tt.Elem()
}
n += tt.Name()
if n == "" {
return "", errors.New("nameless types are not allowed")
}
return n, nil
}
type resumeTokenWrapper[T any] struct {
Type string `json:"type"`
Token T `json:"token"`
}
// NewResumeToken creates a resume token from the specified type.
// An error is returned if the generic type has no name (e.g. struct{}).
func NewResumeToken[TResult, TSource any](from TSource) (string, error) {
n, err := getTokenTypeName[TResult]()
if err != nil {
return "", err
}
b, err := json.Marshal(resumeTokenWrapper[TSource]{
Type: n,
Token: from,
})
if err != nil {
return "", err
}
return string(b), nil
}
// ExtractToken returns the poller-specific token information from the provided token value.
func ExtractToken(token string) ([]byte, error) {
raw := map[string]json.RawMessage{}
if err := json.Unmarshal([]byte(token), &raw); err != nil {
return nil, err
}
// this is dependent on the type resumeTokenWrapper[T]
tk, ok := raw["token"]
if !ok {
return nil, errors.New("missing token value")
}
return tk, nil
}
// IsTokenValid returns an error if the specified token isn't applicable for generic type T.
func IsTokenValid[T any](token string) error {
raw := map[string]interface{}{}
if err := json.Unmarshal([]byte(token), &raw); err != nil {
return err
}
t, ok := raw["type"]
if !ok {
return errors.New("missing type value")
}
tt, ok := t.(string)
if !ok {
return fmt.Errorf("invalid type format %T", t)
}
n, err := getTokenTypeName[T]()
if err != nil {
return err
}
if tt != n {
return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n)
}
return nil
}
// ErrNoBody is returned if the response didn't contain a body.
var ErrNoBody = errors.New("the response did not contain a body")
// GetJSON reads the response body into a raw JSON object.
// It returns ErrNoBody if there was no content.
func GetJSON(resp *http.Response) (map[string]interface{}, error) {
body, err := exported.Payload(resp)
if err != nil {
return nil, err
}
if len(body) == 0 {
return nil, ErrNoBody
}
// unmarshall the body to get the value
var jsonBody map[string]interface{}
if err = json.Unmarshal(body, &jsonBody); err != nil {
return nil, err
}
return jsonBody, nil
}
// provisioningState returns the provisioning state from the response or the empty string.
func provisioningState(jsonBody map[string]interface{}) string {
jsonProps, ok := jsonBody["properties"]
if !ok {
return ""
}
props, ok := jsonProps.(map[string]interface{})
if !ok {
return ""
}
rawPs, ok := props["provisioningState"]
if !ok {
return ""
}
ps, ok := rawPs.(string)
if !ok {
return ""
}
return ps
}
// status returns the status from the response or the empty string.
func status(jsonBody map[string]interface{}) string {
rawStatus, ok := jsonBody["status"]
if !ok {
return ""
}
status, ok := rawStatus.(string)
if !ok {
return ""
}
return status
}
// GetStatus returns the LRO's status from the response body.
// Typically used for Azure-AsyncOperation flows.
// If there is no status in the response body the empty string is returned.
func GetStatus(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
return status(jsonBody), nil
}
// GetProvisioningState returns the LRO's state from the response body.
// If there is no state in the response body the empty string is returned.
func GetProvisioningState(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
return provisioningState(jsonBody), nil
}
// GetResourceLocation returns the LRO's resourceLocation value from the response body.
// Typically used for Operation-Location flows.
// If there is no resourceLocation in the response body the empty string is returned.
func GetResourceLocation(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
v, ok := jsonBody["resourceLocation"]
if !ok {
// it might be ok if the field doesn't exist, the caller must make that determination
return "", nil
}
vv, ok := v.(string)
if !ok {
return "", fmt.Errorf("the resourceLocation value %v was not in string format", v)
}
return vv, nil
}
// used if the operation synchronously completed
type NopPoller[T any] struct {
resp *http.Response
result T
}
// NewNopPoller creates a NopPoller from the provided response.
// It unmarshals the response body into an instance of T.
func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) {
np := &NopPoller[T]{resp: resp}
if resp.StatusCode == http.StatusNoContent {
return np, nil
}
payload, err := exported.Payload(resp)
if err != nil {
return nil, err
}
if len(payload) == 0 {
return np, nil
}
if err = json.Unmarshal(payload, &np.result); err != nil {
return nil, err
}
return np, nil
}
func (*NopPoller[T]) Done() bool {
return true
}
func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) {
return p.resp, nil
}
func (p *NopPoller[T]) Result(ctx context.Context, out *T) error {
*out = p.result
return nil
}
// PollHelper creates and executes the request, calling update() with the response.
// If the request fails, the update func is not called.
// The update func returns the state of the operation for logging purposes or an error
// if it fails to extract the required state from the response.
func PollHelper(ctx context.Context, endpoint string, pl exported.Pipeline, update func(resp *http.Response) (string, error)) error {
req, err := exported.NewRequest(ctx, http.MethodGet, endpoint)
if err != nil {
return err
}
resp, err := pl.Do(req)
if err != nil {
return err
}
state, err := update(resp)
if err != nil {
return err
}
log.Writef(log.EventLRO, "State %s", state)
return nil
}
// ResultHelper processes the response as success or failure.
// In the success case, it unmarshals the payload into either a new instance of T or out.
// In the failure case, it creates an *azcore.Response error from the response.
func ResultHelper[T any](resp *http.Response, failed bool, out *T) error {
// short-circuit the simple success case with no response body to unmarshal
if resp.StatusCode == http.StatusNoContent {
return nil
}
defer resp.Body.Close()
if !StatusCodeValid(resp) || failed {
// the LRO failed. unmarshall the error and update state
return exported.NewResponseError(resp)
}
// success case
payload, err := exported.Payload(resp)
if err != nil {
return err
}
if len(payload) == 0 {
return nil
}
if err = json.Unmarshal(payload, out); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,34 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package shared
const (
ContentTypeAppJSON = "application/json"
ContentTypeAppXML = "application/xml"
)
const (
HeaderAuthorization = "Authorization"
HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary"
HeaderAzureAsync = "Azure-AsyncOperation"
HeaderContentLength = "Content-Length"
HeaderContentType = "Content-Type"
HeaderLocation = "Location"
HeaderOperationLocation = "Operation-Location"
HeaderRetryAfter = "Retry-After"
HeaderUserAgent = "User-Agent"
)
const BearerTokenPrefix = "Bearer "
const (
// Module is the name of the calling module used in telemetry data.
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.3.0"
)

View file

@ -0,0 +1,159 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package shared
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header.
type CtxWithHTTPHeaderKey struct{}
// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions.
type CtxWithRetryOptionsKey struct{}
// CtxIncludeResponseKey is used as a context key for retrieving the raw response.
type CtxIncludeResponseKey struct{}
// Delay waits for the duration to elapse or the context to be cancelled.
func Delay(ctx context.Context, delay time.Duration) error {
select {
case <-time.After(delay):
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// RetryAfter returns non-zero if the response contains a Retry-After header value.
func RetryAfter(resp *http.Response) time.Duration {
if resp == nil {
return 0
}
ra := resp.Header.Get(HeaderRetryAfter)
if ra == "" {
return 0
}
// retry-after values are expressed in either number of
// seconds or an HTTP-date indicating when to try again
if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
return time.Duration(retryAfter) * time.Second
} else if t, err := time.Parse(time.RFC1123, ra); err == nil {
return time.Until(t)
}
return 0
}
// TypeOfT returns the type of the generic type param.
func TypeOfT[T any]() reflect.Type {
// you can't, at present, obtain the type of
// a type parameter, so this is the trick
return reflect.TypeOf((*T)(nil)).Elem()
}
// BytesSetter abstracts replacing a byte slice on some type.
type BytesSetter interface {
Set(b []byte)
}
// NewNopClosingBytesReader creates a new *NopClosingBytesReader for the specified slice.
func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader {
return &NopClosingBytesReader{s: data}
}
// NopClosingBytesReader is an io.ReadSeekCloser around a byte slice.
// It also provides direct access to the byte slice to avoid rereading.
type NopClosingBytesReader struct {
s []byte
i int64
}
// Bytes returns the underlying byte slice.
func (r *NopClosingBytesReader) Bytes() []byte {
return r.s
}
// Close implements the io.Closer interface.
func (*NopClosingBytesReader) Close() error {
return nil
}
// Read implements the io.Reader interface.
func (r *NopClosingBytesReader) Read(b []byte) (n int, err error) {
if r.i >= int64(len(r.s)) {
return 0, io.EOF
}
n = copy(b, r.s[r.i:])
r.i += int64(n)
return
}
// Set replaces the existing byte slice with the specified byte slice and resets the reader.
func (r *NopClosingBytesReader) Set(b []byte) {
r.s = b
r.i = 0
}
// Seek implements the io.Seeker interface.
func (r *NopClosingBytesReader) Seek(offset int64, whence int) (int64, error) {
var i int64
switch whence {
case io.SeekStart:
i = offset
case io.SeekCurrent:
i = r.i + offset
case io.SeekEnd:
i = int64(len(r.s)) + offset
default:
return 0, errors.New("nopClosingBytesReader: invalid whence")
}
if i < 0 {
return 0, errors.New("nopClosingBytesReader: negative position")
}
r.i = i
return i, nil
}
var _ BytesSetter = (*NopClosingBytesReader)(nil)
// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface.
type TransportFunc func(*http.Request) (*http.Response, error)
// Do implements the Transporter interface for the TransportFunc type.
func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) {
return pf(req)
}
// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string.
func ValidateModVer(moduleVersion string) error {
modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`)
if !modVerRegx.MatchString(moduleVersion) {
return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion)
}
return nil
}
// ExtractPackageName returns "package" from "package.Client".
// If clientName is malformed, an error is returned.
func ExtractPackageName(clientName string) (string, error) {
pkg, client, ok := strings.Cut(clientName, ".")
if !ok {
return "", fmt.Errorf("missing . in clientName %s", clientName)
} else if pkg == "" || client == "" {
return "", fmt.Errorf("malformed clientName %s", clientName)
}
return pkg, nil
}

View file

@ -0,0 +1,10 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package log contains functionality for configuring logging behavior.
// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all".
package log

View file

@ -0,0 +1,50 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Package log provides functionality for configuring logging facilities.
package log
import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
// Event is used to group entries. Each group can be toggled on or off.
type Event = log.Event
const (
// EventRequest entries contain information about HTTP requests.
// This includes information like the URL, query parameters, and headers.
EventRequest Event = "Request"
// EventResponse entries contain information about HTTP responses.
// This includes information like the HTTP status code, headers, and request URL.
EventResponse Event = "Response"
// EventRetryPolicy entries contain information specific to the retry policy in use.
EventRetryPolicy Event = "Retry"
// EventLRO entries contain information specific to long-running operations.
// This includes information like polling location, operation state, and sleep intervals.
EventLRO Event = "LongRunningOperation"
)
// SetEvents is used to control which events are written to
// the log. By default all log events are writen.
// NOTE: this is not goroutine safe and should be called before using SDK clients.
func SetEvents(cls ...Event) {
log.SetEvents(cls...)
}
// SetListener will set the Logger to write to the specified Listener.
// NOTE: this is not goroutine safe and should be called before using SDK clients.
func SetListener(lst func(Event, string)) {
log.SetListener(lst)
}
// for testing purposes
func resetEvents() {
log.TestResetEvents()
}

View file

@ -0,0 +1,10 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package policy contains the definitions needed for configuring in-box pipeline policies
// and creating custom policies.
package policy

View file

@ -0,0 +1,155 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package policy
import (
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
)
// Policy represents an extensibility point for the Pipeline that can mutate the specified
// Request and react to the received Response.
type Policy = exported.Policy
// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
type Transporter = exported.Transporter
// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
// Don't use this type directly, use runtime.NewRequest() instead.
type Request = exported.Request
// ClientOptions contains optional settings for a client's pipeline.
// All zero-value fields will be initialized with default values.
type ClientOptions struct {
// APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions.
APIVersion string
// Cloud specifies a cloud for the client. The default is Azure Public Cloud.
Cloud cloud.Configuration
// Logging configures the built-in logging policy.
Logging LogOptions
// Retry configures the built-in retry policy.
Retry RetryOptions
// Telemetry configures the built-in telemetry policy.
Telemetry TelemetryOptions
// TracingProvider configures the tracing provider.
// It defaults to a no-op tracer.
TracingProvider tracing.Provider
// Transport sets the transport for HTTP requests.
Transport Transporter
// PerCallPolicies contains custom policies to inject into the pipeline.
// Each policy is executed once per request.
PerCallPolicies []Policy
// PerRetryPolicies contains custom policies to inject into the pipeline.
// Each policy is executed once per request, and for each retry of that request.
PerRetryPolicies []Policy
}
// LogOptions configures the logging policy's behavior.
type LogOptions struct {
// IncludeBody indicates if request and response bodies should be included in logging.
// The default value is false.
// NOTE: enabling this can lead to disclosure of sensitive information, use with care.
IncludeBody bool
// AllowedHeaders is the slice of headers to log with their values intact.
// All headers not in the slice will have their values REDACTED.
// Applies to request and response headers.
AllowedHeaders []string
// AllowedQueryParams is the slice of query parameters to log with their values intact.
// All query parameters not in the slice will have their values REDACTED.
AllowedQueryParams []string
}
// RetryOptions configures the retry policy's behavior.
// Zero-value fields will have their specified default values applied during use.
// This allows for modification of a subset of fields.
type RetryOptions struct {
// MaxRetries specifies the maximum number of attempts a failed operation will be retried
// before producing an error.
// The default value is three. A value less than zero means one try and no retries.
MaxRetries int32
// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
// This is disabled by default. Specify a value greater than zero to enable.
// NOTE: Setting this to a small value might cause premature HTTP request time-outs.
TryTimeout time.Duration
// RetryDelay specifies the initial amount of delay to use before retrying an operation.
// The value is used only if the HTTP response does not contain a Retry-After header.
// The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay.
// The default value is four seconds. A value less than zero means no delay between retries.
RetryDelay time.Duration
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation.
// Typically the value is greater than or equal to the value specified in RetryDelay.
// The default Value is 120 seconds. A value less than zero means there is no cap.
MaxRetryDelay time.Duration
// StatusCodes specifies the HTTP status codes that indicate the operation should be retried.
// A nil slice will use the following values.
// http.StatusRequestTimeout 408
// http.StatusTooManyRequests 429
// http.StatusInternalServerError 500
// http.StatusBadGateway 502
// http.StatusServiceUnavailable 503
// http.StatusGatewayTimeout 504
// Specifying values will replace the default values.
// Specifying an empty slice will disable retries for HTTP status codes.
StatusCodes []int
}
// TelemetryOptions configures the telemetry policy's behavior.
type TelemetryOptions struct {
// ApplicationID is an application-specific identification string to add to the User-Agent.
// It has a maximum length of 24 characters and must not contain any spaces.
ApplicationID string
// Disabled will prevent the addition of any telemetry data to the User-Agent.
Disabled bool
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
type TokenRequestOptions = exported.TokenRequestOptions
// BearerTokenOptions configures the bearer token policy's behavior.
type BearerTokenOptions struct {
// AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request.
// When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from
// its given credential.
AuthorizationHandler AuthorizationHandler
}
// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request.
type AuthorizationHandler struct {
// OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token
// from the policy's given credential. Implementations that need to perform I/O should use the Request's context,
// available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't
// send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a
// token from its credential according to its configuration.
OnRequest func(*Request, func(TokenRequestOptions) error) error
// OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the
// request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible
// for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's
// given credential. Implementations that need to perform I/O should use the Request's context, available from
// Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil,
// the policy will return any 401 response to the client.
OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error
}

View file

@ -0,0 +1,10 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package runtime contains various facilities for creating requests and handling responses.
// The content is intended for SDK authors.
package runtime

View file

@ -0,0 +1,19 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
)
// NewResponseError creates an *azcore.ResponseError from the provided HTTP response.
// Call this when a service request returns a non-successful status code.
func NewResponseError(resp *http.Response) error {
return exported.NewResponseError(resp)
}

View file

@ -0,0 +1,77 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"encoding/json"
"errors"
)
// PagingHandler contains the required data for constructing a Pager.
type PagingHandler[T any] struct {
// More returns a boolean indicating if there are more pages to fetch.
// It uses the provided page to make the determination.
More func(T) bool
// Fetcher fetches the first and subsequent pages.
Fetcher func(context.Context, *T) (T, error)
}
// Pager provides operations for iterating over paged responses.
type Pager[T any] struct {
current *T
handler PagingHandler[T]
firstPage bool
}
// NewPager creates an instance of Pager using the specified PagingHandler.
// Pass a non-nil T for firstPage if the first page has already been retrieved.
func NewPager[T any](handler PagingHandler[T]) *Pager[T] {
return &Pager[T]{
handler: handler,
firstPage: true,
}
}
// More returns true if there are more pages to retrieve.
func (p *Pager[T]) More() bool {
if p.current != nil {
return p.handler.More(*p.current)
}
return true
}
// NextPage advances the pager to the next page.
func (p *Pager[T]) NextPage(ctx context.Context) (T, error) {
var resp T
var err error
if p.current != nil {
if p.firstPage {
// we get here if it's an LRO-pager, we already have the first page
p.firstPage = false
return *p.current, nil
} else if !p.handler.More(*p.current) {
return *new(T), errors.New("no more pages")
}
resp, err = p.handler.Fetcher(ctx, p.current)
} else {
// non-LRO case, first page
p.firstPage = false
resp, err = p.handler.Fetcher(ctx, nil)
}
if err != nil {
return *new(T), err
}
p.current = &resp
return *p.current, nil
}
// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T].
func (p *Pager[T]) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &p.current)
}

View file

@ -0,0 +1,77 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// PipelineOptions contains Pipeline options for SDK developers
type PipelineOptions struct {
AllowedHeaders, AllowedQueryParameters []string
APIVersion APIVersionOptions
PerCall, PerRetry []policy.Policy
}
// Pipeline represents a primitive for sending HTTP requests and receiving responses.
// Its behavior can be extended by specifying policies during construction.
type Pipeline = exported.Pipeline
// NewPipeline creates a pipeline from connection options, with any additional policies as specified.
// Policies from ClientOptions are placed after policies from PipelineOptions.
// The module and version parameters are used by the telemetry policy, when enabled.
func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline {
cp := policy.ClientOptions{}
if options != nil {
cp = *options
}
if len(plOpts.AllowedHeaders) > 0 {
headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders))
copy(headers, plOpts.AllowedHeaders)
headers = append(headers, cp.Logging.AllowedHeaders...)
cp.Logging.AllowedHeaders = headers
}
if len(plOpts.AllowedQueryParameters) > 0 {
qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams))
copy(qp, plOpts.AllowedQueryParameters)
qp = append(qp, cp.Logging.AllowedQueryParams...)
cp.Logging.AllowedQueryParams = qp
}
// we put the includeResponsePolicy at the very beginning so that the raw response
// is populated with the final response (some policies might mutate the response)
policies := []policy.Policy{policyFunc(includeResponsePolicy)}
if cp.APIVersion != "" {
policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion))
}
if !cp.Telemetry.Disabled {
policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry))
}
policies = append(policies, plOpts.PerCall...)
policies = append(policies, cp.PerCallPolicies...)
policies = append(policies, NewRetryPolicy(&cp.Retry))
policies = append(policies, plOpts.PerRetry...)
policies = append(policies, cp.PerRetryPolicies...)
policies = append(policies, NewLogPolicy(&cp.Logging))
policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy))
transport := cp.Transport
if transport == nil {
transport = defaultHTTPClient
}
return exported.NewPipeline(transport, policies...)
}
// policyFunc is a type that implements the Policy interface.
// Use this type when implementing a stateless policy as a first-class function.
type policyFunc func(*policy.Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
return pf(req)
}

View file

@ -0,0 +1,75 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"errors"
"fmt"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// APIVersionOptions contains options for API versions
type APIVersionOptions struct {
// Location indicates where to set the version on a request, for example in a header or query param
Location APIVersionLocation
// Name is the name of the header or query parameter, for example "api-version"
Name string
}
// APIVersionLocation indicates which part of a request identifies the service version
type APIVersionLocation int
const (
// APIVersionLocationQueryParam indicates a query parameter
APIVersionLocationQueryParam = 0
// APIVersionLocationHeader indicates a header
APIVersionLocationHeader = 1
)
// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version
// isn't empty and opts.Name is empty, Do will return an error.
func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy {
if opts == nil {
opts = &APIVersionOptions{}
}
return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version}
}
// apiVersionPolicy enables users to set the API version of every request a client sends.
type apiVersionPolicy struct {
// location indicates whether "name" refers to a query parameter or header.
location APIVersionLocation
// name of the query param or header whose value should be overridden; provided by the client.
name string
// version is the value (provided by the user) that replaces the default version value.
version string
}
// Do sets the request's API version, if the policy is configured to do so, replacing any prior value.
func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) {
if a.version != "" {
if a.name == "" {
// user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions
return nil, errors.New("this client doesn't support overriding its API version")
}
switch a.location {
case APIVersionLocationHeader:
req.Raw().Header.Set(a.name, a.version)
case APIVersionLocationQueryParam:
q := req.Raw().URL.Query()
q.Set(a.name, a.version)
req.Raw().URL.RawQuery = q.Encode()
default:
return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location)
}
}
return req.Next()
}

View file

@ -0,0 +1,116 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"errors"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
"github.com/Azure/azure-sdk-for-go/sdk/internal/temporal"
)
// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
type BearerTokenPolicy struct {
// mainResource is the resource to be retreived using the tenant specified in the credential
mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState]
// the following fields are read-only
authzHandler policy.AuthorizationHandler
cred exported.TokenCredential
scopes []string
}
type acquiringResourceState struct {
req *policy.Request
p *BearerTokenPolicy
tro policy.TokenRequestOptions
}
// acquire acquires or updates the resource; only one
// thread/goroutine at a time ever calls this function
func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) {
tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro)
if err != nil {
return exported.AccessToken{}, time.Time{}, err
}
return tk, tk.ExpiresOn, nil
}
// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
// scopes: the list of permission scopes required for the token.
// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options.
func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy {
if opts == nil {
opts = &policy.BearerTokenOptions{}
}
return &BearerTokenPolicy{
authzHandler: opts.AuthorizationHandler,
cred: cred,
scopes: scopes,
mainResource: temporal.NewResource(acquire),
}
}
// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential
func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error {
return func(tro policy.TokenRequestOptions) error {
as := acquiringResourceState{p: b, req: req, tro: tro}
tk, err := b.mainResource.Get(as)
if err != nil {
return err
}
req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token)
return nil
}
}
// Do authorizes a request with a bearer token
func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
var err error
if b.authzHandler.OnRequest != nil {
err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
} else {
err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes})
}
if err != nil {
return nil, ensureNonRetriable(err)
}
res, err := req.Next()
if err != nil {
return nil, err
}
if res.StatusCode == http.StatusUnauthorized {
b.mainResource.Expire()
if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil {
if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
res, err = req.Next()
}
}
}
return res, ensureNonRetriable(err)
}
func ensureNonRetriable(err error) error {
var nre errorinfo.NonRetriable
if err != nil && !errors.As(err, &nre) {
err = btpError{err}
}
return err
}
// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize
type btpError struct {
error
}
func (btpError) NonRetriable() {}
var _ errorinfo.NonRetriable = (*btpError)(nil)

View file

@ -0,0 +1,73 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"fmt"
"net/http"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
)
// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte.
func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) {
resp, err := req.Next()
if err != nil {
return resp, err
}
var opValues bodyDownloadPolicyOpValues
// don't skip downloading error response bodies
if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 {
return resp, err
}
// Either bodyDownloadPolicyOpValues was not specified (so skip is false)
// or it was specified and skip is false: don't skip downloading the body
_, err = exported.Payload(resp)
if err != nil {
return resp, newBodyDownloadError(err, req)
}
return resp, err
}
// bodyDownloadPolicyOpValues is the struct containing the per-operation values
type bodyDownloadPolicyOpValues struct {
Skip bool
}
type bodyDownloadError struct {
err error
}
func newBodyDownloadError(err error, req *policy.Request) error {
// on failure, only retry the request for idempotent operations.
// we currently identify them as DELETE, GET, and PUT requests.
if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut {
// error is safe for retry
return err
}
// wrap error to avoid retries
return &bodyDownloadError{
err: err,
}
}
func (b *bodyDownloadError) Error() string {
return fmt.Sprintf("body download policy: %s", b.err.Error())
}
func (b *bodyDownloadError) NonRetriable() {
// marker method
}
func (b *bodyDownloadError) Unwrap() error {
return b.err
}
var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil)

View file

@ -0,0 +1,39 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request
func httpHeaderPolicy(req *policy.Request) (*http.Response, error) {
// check if any custom HTTP headers have been specified
if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil {
for k, v := range header.(http.Header) {
// use Set to replace any existing value
// it also canonicalizes the header key
req.Raw().Header.Set(k, v[0])
// add any remaining values
for i := 1; i < len(v); i++ {
req.Raw().Header.Add(k, v[i])
}
}
}
return req.Next()
}
// WithHTTPHeader adds the specified http.Header to the parent context.
// Use this to specify custom HTTP headers at the API-call level.
// Any overlapping headers will have their values replaced with the values specified here.
func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header)
}

View file

@ -0,0 +1,34 @@
//go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request
func includeResponsePolicy(req *policy.Request) (*http.Response, error) {
resp, err := req.Next()
if resp == nil {
return resp, err
}
if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil {
httpOut := httpOutRaw.(**http.Response)
*httpOut = resp
}
return resp, err
}
// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context.
// The resp parameter will contain the HTTP response after the request has completed.
func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context {
return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp)
}

View file

@ -0,0 +1,251 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"bytes"
"fmt"
"io"
"net/http"
"sort"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/diag"
)
type logPolicy struct {
includeBody bool
allowedHeaders map[string]struct{}
allowedQP map[string]struct{}
}
// NewLogPolicy creates a request/response logging policy object configured using the specified options.
// Pass nil to accept the default values; this is the same as passing a zero-value options.
func NewLogPolicy(o *policy.LogOptions) policy.Policy {
if o == nil {
o = &policy.LogOptions{}
}
// construct default hash set of allowed headers
allowedHeaders := map[string]struct{}{
"accept": {},
"cache-control": {},
"connection": {},
"content-length": {},
"content-type": {},
"date": {},
"etag": {},
"expires": {},
"if-match": {},
"if-modified-since": {},
"if-none-match": {},
"if-unmodified-since": {},
"last-modified": {},
"ms-cv": {},
"pragma": {},
"request-id": {},
"retry-after": {},
"server": {},
"traceparent": {},
"transfer-encoding": {},
"user-agent": {},
"www-authenticate": {},
"x-ms-request-id": {},
"x-ms-client-request-id": {},
"x-ms-return-client-request-id": {},
}
// add any caller-specified allowed headers to the set
for _, ah := range o.AllowedHeaders {
allowedHeaders[strings.ToLower(ah)] = struct{}{}
}
// now do the same thing for query params
allowedQP := map[string]struct{}{
"api-version": {},
}
for _, qp := range o.AllowedQueryParams {
allowedQP[strings.ToLower(qp)] = struct{}{}
}
return &logPolicy{
includeBody: o.IncludeBody,
allowedHeaders: allowedHeaders,
allowedQP: allowedQP,
}
}
// logPolicyOpValues is the struct containing the per-operation values
type logPolicyOpValues struct {
try int32
start time.Time
}
func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) {
// Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object.
var opValues logPolicyOpValues
if req.OperationValue(&opValues); opValues.start.IsZero() {
opValues.start = time.Now() // If this is the 1st try, record this operation's start time
}
opValues.try++ // The first try is #1 (not #0)
req.SetOperationValue(opValues)
// Log the outgoing request as informational
if log.Should(log.EventRequest) {
b := &bytes.Buffer{}
fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try)
p.writeRequestWithResponse(b, req, nil, nil)
var err error
if p.includeBody {
err = writeReqBody(req, b)
}
log.Write(log.EventRequest, b.String())
if err != nil {
return nil, err
}
}
// Set the time for this particular retry operation and then Do the operation.
tryStart := time.Now()
response, err := req.Next() // Make the request
tryEnd := time.Now()
tryDuration := tryEnd.Sub(tryStart)
opDuration := tryEnd.Sub(opValues.start)
if log.Should(log.EventResponse) {
// We're going to log this; build the string to log
b := &bytes.Buffer{}
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration)
if err != nil { // This HTTP request did not get a response from the service
fmt.Fprint(b, "REQUEST ERROR\n")
} else {
fmt.Fprint(b, "RESPONSE RECEIVED\n")
}
p.writeRequestWithResponse(b, req, response, err)
if err != nil {
// skip frames runtime.Callers() and runtime.StackTrace()
b.WriteString(diag.StackTrace(2, 32))
} else if p.includeBody {
err = writeRespBody(response, b)
}
log.Write(log.EventResponse, b.String())
}
return response, err
}
const redactedValue = "REDACTED"
// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
// not nil, then these are also written into the Buffer.
func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
// redact applicable query params
cpURL := *req.Raw().URL
qp := cpURL.Query()
for k := range qp {
if _, ok := p.allowedQP[strings.ToLower(k)]; !ok {
qp.Set(k, redactedValue)
}
}
cpURL.RawQuery = qp.Encode()
// Write the request into the buffer.
fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n")
p.writeHeader(b, req.Raw().Header)
if resp != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n")
p.writeHeader(b, resp.Header)
}
if err != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
}
}
// formatHeaders appends an HTTP request's or response's header into a Buffer.
func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) {
if len(header) == 0 {
b.WriteString(" (no headers)\n")
return
}
keys := make([]string, 0, len(header))
// Alphabetize the headers
for k := range header {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
value := header.Get(k)
// redact all header values not in the allow-list
if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok {
value = redactedValue
}
fmt.Fprintf(b, " %s: %+v\n", k, value)
}
}
// returns true if the request/response body should be logged.
// this is determined by looking at the content-type header value.
func shouldLogBody(b *bytes.Buffer, contentType string) bool {
contentType = strings.ToLower(contentType)
if strings.HasPrefix(contentType, "text") ||
strings.Contains(contentType, "json") ||
strings.Contains(contentType, "xml") {
return true
}
fmt.Fprintf(b, " Skip logging body for %s\n", contentType)
return false
}
// writes to a buffer, used for logging purposes
func writeReqBody(req *policy.Request, b *bytes.Buffer) error {
if req.Raw().Body == nil {
fmt.Fprint(b, " Request contained no body\n")
return nil
}
if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) {
return nil
}
body, err := io.ReadAll(req.Raw().Body)
if err != nil {
fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error())
return err
}
if err := req.RewindBody(); err != nil {
return err
}
logBody(b, body)
return nil
}
// writes to a buffer, used for logging purposes
func writeRespBody(resp *http.Response, b *bytes.Buffer) error {
ct := resp.Header.Get(shared.HeaderContentType)
if ct == "" {
fmt.Fprint(b, " Response contained no body\n")
return nil
} else if !shouldLogBody(b, ct) {
return nil
}
body, err := Payload(resp)
if err != nil {
fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error())
return err
}
if len(body) > 0 {
logBody(b, body)
} else {
fmt.Fprint(b, " Response contained no body\n")
}
return nil
}
func logBody(b *bytes.Buffer, body []byte) {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
fmt.Fprintln(b, string(body))
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
}

View file

@ -0,0 +1,34 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
type requestIDPolicy struct{}
// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header
func NewRequestIDPolicy() policy.Policy {
return &requestIDPolicy{}
}
func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) {
const requestIdHeader = "x-ms-client-request-id"
if req.Raw().Header.Get(requestIdHeader) == "" {
id, err := uuid.New()
if err != nil {
return nil, err
}
req.Raw().Header.Set(requestIdHeader, id.String())
}
return req.Next()
}

View file

@ -0,0 +1,251 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"errors"
"io"
"math"
"math/rand"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
)
const (
defaultMaxRetries = 3
)
func setDefaults(o *policy.RetryOptions) {
if o.MaxRetries == 0 {
o.MaxRetries = defaultMaxRetries
} else if o.MaxRetries < 0 {
o.MaxRetries = 0
}
// SDK guidelines specify the default MaxRetryDelay is 60 seconds
if o.MaxRetryDelay == 0 {
o.MaxRetryDelay = 60 * time.Second
} else if o.MaxRetryDelay < 0 {
// not really an unlimited cap, but sufficiently large enough to be considered as such
o.MaxRetryDelay = math.MaxInt64
}
if o.RetryDelay == 0 {
o.RetryDelay = 800 * time.Millisecond
} else if o.RetryDelay < 0 {
o.RetryDelay = 0
}
if o.StatusCodes == nil {
// NOTE: if you change this list, you MUST update the docs in policy/policy.go
o.StatusCodes = []int{
http.StatusRequestTimeout, // 408
http.StatusTooManyRequests, // 429
http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503
http.StatusGatewayTimeout, // 504
}
}
}
func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0
pow := func(number int64, exponent int32) int64 { // pow is nested helper function
var result int64 = 1
for n := int32(0); n < exponent; n++ {
result *= number
}
return result
}
delay := time.Duration(pow(2, try)-1) * o.RetryDelay
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand
if delay > o.MaxRetryDelay {
delay = o.MaxRetryDelay
}
return delay
}
// NewRetryPolicy creates a policy object configured using the specified options.
// Pass nil to accept the default values; this is the same as passing a zero-value options.
func NewRetryPolicy(o *policy.RetryOptions) policy.Policy {
if o == nil {
o = &policy.RetryOptions{}
}
p := &retryPolicy{options: *o}
return p
}
type retryPolicy struct {
options policy.RetryOptions
}
func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
options := p.options
// check if the retry options have been overridden for this call
if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil {
options = override.(policy.RetryOptions)
}
setDefaults(&options)
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
// When to retry: connection failure or temporary/timeout.
var rwbody *retryableRequestBody
if req.Body() != nil {
// wrap the body so we control when it's actually closed.
// do this outside the for loop so defers don't accumulate.
rwbody = &retryableRequestBody{body: req.Body()}
defer rwbody.realClose()
}
try := int32(1)
for {
resp = nil // reset
log.Writef(log.EventRetryPolicy, "=====> Try=%d", try)
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
// 1st try as for additional tries.
err = req.RewindBody()
if err != nil {
return
}
// RewindBody() restores Raw().Body to its original state, so set our rewindable after
if rwbody != nil {
req.Raw().Body = rwbody
}
if options.TryTimeout == 0 {
resp, err = req.Next()
} else {
// Set the per-try time for this particular retry operation and then Do the operation.
tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout)
clone := req.Clone(tryCtx)
resp, err = clone.Next() // Make the request
// if the body was already downloaded or there was an error it's safe to cancel the context now
if err != nil {
tryCancel()
} else if _, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
tryCancel()
} else {
// must cancel the context after the body has been read and closed
resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body}
}
}
if err == nil {
log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode)
} else {
log.Writef(log.EventRetryPolicy, "error %v", err)
}
if err == nil && !HasStatusCode(resp, options.StatusCodes...) {
// if there is no error and the response code isn't in the list of retry codes then we're done.
log.Write(log.EventRetryPolicy, "exit due to non-retriable status code")
return
} else if ctxErr := req.Raw().Context().Err(); ctxErr != nil {
// don't retry if the parent context has been cancelled or its deadline exceeded
err = ctxErr
log.Writef(log.EventRetryPolicy, "abort due to %v", err)
return
}
// check if the error is not retriable
var nre errorinfo.NonRetriable
if errors.As(err, &nre) {
// the error says it's not retriable so don't retry
log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre)
return
}
if try == options.MaxRetries+1 {
// max number of tries has been reached, don't sleep again
log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries)
return
}
// use the delay from retry-after if available
delay := shared.RetryAfter(resp)
if delay <= 0 {
delay = calcDelay(options, try)
} else if delay > options.MaxRetryDelay {
// the retry-after delay exceeds the the cap so don't retry
log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay)
return
}
// drain before retrying so nothing is leaked
Drain(resp)
log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay)
select {
case <-time.After(delay):
try++
case <-req.Raw().Context().Done():
err = req.Raw().Context().Err()
log.Writef(log.EventRetryPolicy, "abort due to %v", err)
return
}
}
}
// WithRetryOptions adds the specified RetryOptions to the parent context.
// Use this to specify custom RetryOptions at the API-call level.
func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context {
return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options)
}
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
// This struct is used when sending a body to the network
type retryableRequestBody struct {
body io.ReadSeeker // Seeking is required to support retries
}
// Read reads a block of data from an inner stream and reports progress
func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
return b.body.Read(p)
}
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
return b.body.Seek(offset, whence)
}
func (b *retryableRequestBody) Close() error {
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
// The retry policy closes the request body upon success.
return nil
}
func (b *retryableRequestBody) realClose() error {
if c, ok := b.body.(io.Closer); ok {
return c.Close()
}
return nil
}
// ********** The following type/methods implement the contextCancelReadCloser
// contextCancelReadCloser combines an io.ReadCloser with a cancel func.
// it ensures the cancel func is invoked once the body has been read and closed.
type contextCancelReadCloser struct {
cf context.CancelFunc
body io.ReadCloser
}
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
return rc.body.Read(p)
}
func (rc *contextCancelReadCloser) Close() error {
err := rc.body.Close()
rc.cf()
return err
}

View file

@ -0,0 +1,79 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"bytes"
"fmt"
"net/http"
"os"
"runtime"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
type telemetryPolicy struct {
telemetryValue string
}
// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests.
// The format is [<application_id> ]azsdk-go-<mod>/<ver> <platform_info>.
// Pass nil to accept the default values; this is the same as passing a zero-value options.
func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy {
if o == nil {
o = &policy.TelemetryOptions{}
}
tp := telemetryPolicy{}
if o.Disabled {
return &tp
}
b := &bytes.Buffer{}
// normalize ApplicationID
if o.ApplicationID != "" {
o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/")
if len(o.ApplicationID) > 24 {
o.ApplicationID = o.ApplicationID[:24]
}
b.WriteString(o.ApplicationID)
b.WriteRune(' ')
}
b.WriteString(formatTelemetry(mod, ver))
b.WriteRune(' ')
b.WriteString(platformInfo)
tp.telemetryValue = b.String()
return &tp
}
func formatTelemetry(comp, ver string) string {
return fmt.Sprintf("azsdk-go-%s/%s", comp, ver)
}
func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) {
if p.telemetryValue == "" {
return req.Next()
}
// preserve the existing User-Agent string
if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" {
p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua)
}
req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue)
return req.Next()
}
// NOTE: the ONLY function that should write to this variable is this func
var platformInfo = func() string {
operatingSystem := runtime.GOOS // Default OS string
switch operatingSystem {
case "windows":
operatingSystem = os.Getenv("OS") // Get more specific OS information
case "linux": // accept default OS info
case "freebsd": // accept default OS info
}
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
}()

View file

@ -0,0 +1,326 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// FinalStateVia is the enumerated type for the possible final-state-via values.
type FinalStateVia = pollers.FinalStateVia
const (
// FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL.
FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp
// FinalStateViaLocation indicates the final payload comes from the Location URL.
FinalStateViaLocation = pollers.FinalStateViaLocation
// FinalStateViaOriginalURI indicates the final payload comes from the original URL.
FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI
// FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL.
FinalStateViaOpLocation = pollers.FinalStateViaOpLocation
)
// NewPollerOptions contains the optional parameters for NewPoller.
type NewPollerOptions[T any] struct {
// FinalStateVia contains the final-state-via value for the LRO.
FinalStateVia FinalStateVia
// Response contains a preconstructed response type.
// The final payload will be unmarshaled into it and returned.
Response *T
// Handler[T] contains a custom polling implementation.
Handler PollingHandler[T]
}
// NewPoller creates a Poller based on the provided initial response.
func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) {
if options == nil {
options = &NewPollerOptions[T]{}
}
result := options.Response
if result == nil {
result = new(T)
}
if options.Handler != nil {
return &Poller[T]{
op: options.Handler,
resp: resp,
result: result,
}, nil
}
defer resp.Body.Close()
// this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
// ideally the codegen should return an error if the initial response failed and not even create a poller.
if !pollers.StatusCodeValid(resp) {
return nil, errors.New("the operation failed or was cancelled")
}
// determine the polling method
var opr PollingHandler[T]
var err error
if async.Applicable(resp) {
// async poller must be checked first as it can also have a location header
opr, err = async.New[T](pl, resp, options.FinalStateVia)
} else if op.Applicable(resp) {
// op poller must be checked before loc as it can also have a location header
opr, err = op.New[T](pl, resp, options.FinalStateVia)
} else if loc.Applicable(resp) {
opr, err = loc.New[T](pl, resp)
} else if body.Applicable(resp) {
// must test body poller last as it's a subset of the other pollers.
// TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion)
opr, err = body.New[T](pl, resp)
} else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) {
// if we get here it means we have a 202 with no polling headers.
// for DELETE and POST this is a hard error per ARM RPC spec.
return nil, errors.New("response is missing polling URL")
} else {
opr, err = pollers.NewNopPoller[T](resp)
}
if err != nil {
return nil, err
}
return &Poller[T]{
op: opr,
resp: resp,
result: result,
}, nil
}
// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken.
type NewPollerFromResumeTokenOptions[T any] struct {
// Response contains a preconstructed response type.
// The final payload will be unmarshaled into it and returned.
Response *T
// Handler[T] contains a custom polling implementation.
Handler PollingHandler[T]
}
// NewPollerFromResumeToken creates a Poller from a resume token string.
func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) {
if options == nil {
options = &NewPollerFromResumeTokenOptions[T]{}
}
result := options.Response
if result == nil {
result = new(T)
}
if err := pollers.IsTokenValid[T](token); err != nil {
return nil, err
}
raw, err := pollers.ExtractToken(token)
if err != nil {
return nil, err
}
var asJSON map[string]interface{}
if err := json.Unmarshal(raw, &asJSON); err != nil {
return nil, err
}
opr := options.Handler
// now rehydrate the poller based on the encoded poller type
if async.CanResume(asJSON) {
opr, _ = async.New[T](pl, nil, "")
} else if body.CanResume(asJSON) {
opr, _ = body.New[T](pl, nil)
} else if loc.CanResume(asJSON) {
opr, _ = loc.New[T](pl, nil)
} else if op.CanResume(asJSON) {
opr, _ = op.New[T](pl, nil, "")
} else if opr != nil {
log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
} else {
return nil, fmt.Errorf("unhandled poller token %s", string(raw))
}
if err := json.Unmarshal(raw, &opr); err != nil {
return nil, err
}
return &Poller[T]{
op: opr,
result: result,
}, nil
}
// PollingHandler[T] abstracts the differences among poller implementations.
type PollingHandler[T any] interface {
// Done returns true if the LRO has reached a terminal state.
Done() bool
// Poll fetches the latest state of the LRO.
Poll(context.Context) (*http.Response, error)
// Result is called once the LRO has reached a terminal state. It populates the out parameter
// with the result of the operation.
Result(ctx context.Context, out *T) error
}
// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state.
type Poller[T any] struct {
op PollingHandler[T]
resp *http.Response
err error
result *T
done bool
}
// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method.
type PollUntilDoneOptions struct {
// Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second.
// Pass zero to accept the default value (30s).
Frequency time.Duration
}
// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires.
// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals.
// options: pass nil to accept the default values.
// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might
// benefit from a shorter or longer duration.
func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) {
if options == nil {
options = &PollUntilDoneOptions{}
}
cp := *options
if cp.Frequency == 0 {
cp.Frequency = 30 * time.Second
}
// skip the floor check when executing tests so they don't take so long
if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second {
return *new(T), errors.New("polling frequency minimum is one second")
}
start := time.Now()
logPollUntilDoneExit := func(v interface{}) {
log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start))
}
log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op)
if p.resp != nil {
// initial check for a retry-after header existing on the initial response
if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 {
log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String())
if err := shared.Delay(ctx, retryAfter); err != nil {
logPollUntilDoneExit(err)
return *new(T), err
}
}
}
// begin polling the endpoint until a terminal state is reached
for {
resp, err := p.Poll(ctx)
if err != nil {
logPollUntilDoneExit(err)
return *new(T), err
}
if p.Done() {
logPollUntilDoneExit("succeeded")
return p.Result(ctx)
}
d := cp.Frequency
if retryAfter := shared.RetryAfter(resp); retryAfter > 0 {
log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String())
d = retryAfter
} else {
log.Writef(log.EventLRO, "delay for %s", d.String())
}
if err = shared.Delay(ctx, d); err != nil {
logPollUntilDoneExit(err)
return *new(T), err
}
}
}
// Poll fetches the latest state of the LRO. It returns an HTTP response or error.
// If Poll succeeds, the poller's state is updated and the HTTP response is returned.
// If Poll fails, the poller's state is unmodified and the error is returned.
// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response.
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
if p.Done() {
// the LRO has reached a terminal state, don't poll again
return p.resp, nil
}
resp, err := p.op.Poll(ctx)
if err != nil {
return nil, err
}
p.resp = resp
return p.resp, nil
}
// Done returns true if the LRO has reached a terminal state.
// Once a terminal state is reached, call Result().
func (p *Poller[T]) Done() bool {
return p.op.Done()
}
// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done.
// If the LRO completed successfully, a populated instance of T is returned.
// If the LRO failed or was canceled, an *azcore.ResponseError error is returned.
// Calling this on an LRO in a non-terminal state will return an error.
func (p *Poller[T]) Result(ctx context.Context) (T, error) {
if !p.Done() {
return *new(T), errors.New("poller is in a non-terminal state")
}
if p.done {
// the result has already been retrieved, return the cached value
if p.err != nil {
return *new(T), p.err
}
return *p.result, nil
}
err := p.op.Result(ctx, p.result)
var respErr *exported.ResponseError
if errors.As(err, &respErr) {
// the LRO failed. record the error
p.err = err
} else if err != nil {
// the call to Result failed, don't cache anything in this case
return *new(T), err
}
p.done = true
if p.err != nil {
return *new(T), p.err
}
return *p.result, nil
}
// ResumeToken returns a value representing the poller that can be used to resume
// the LRO at a later time. ResumeTokens are unique per service operation.
// The token's format should be considered opaque and is subject to change.
// Calling this on an LRO in a terminal state will return an error.
func (p *Poller[T]) ResumeToken() (string, error) {
if p.Done() {
return "", errors.New("poller is in a terminal state")
}
tk, err := pollers.NewResumeToken[T](p.op)
if err != nil {
return "", err
}
return tk, err
}

View file

@ -0,0 +1,248 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"mime/multipart"
"os"
"path"
"reflect"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when
// encoding/decoding a slice of bytes to/from a string.
type Base64Encoding int
const (
// Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads.
Base64StdFormat Base64Encoding = 0
// Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads.
Base64URLFormat Base64Encoding = 1
)
// NewRequest creates a new policy.Request with the specified input.
// The endpoint MUST be properly encoded before calling this function.
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) {
return exported.NewRequest(ctx, httpMethod, endpoint)
}
// JoinPaths concatenates multiple URL path segments into one path,
// inserting path separation characters as required. JoinPaths will preserve
// query parameters in the root path
func JoinPaths(root string, paths ...string) string {
if len(paths) == 0 {
return root
}
qps := ""
if strings.Contains(root, "?") {
splitPath := strings.Split(root, "?")
root, qps = splitPath[0], splitPath[1]
}
p := path.Join(paths...)
// path.Join will remove any trailing slashes.
// if one was provided, preserve it.
if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") {
p += "/"
}
if qps != "" {
p = p + "?" + qps
}
if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") {
root = root[:len(root)-1]
} else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") {
p = "/" + p
}
return root + p
}
// EncodeByteArray will base-64 encode the byte slice v.
func EncodeByteArray(v []byte, format Base64Encoding) string {
if format == Base64URLFormat {
return base64.RawURLEncoding.EncodeToString(v)
}
return base64.StdEncoding.EncodeToString(v)
}
// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody.
// The encoded value is treated as a JSON string.
func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error {
// send as a JSON string
encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format))
return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON)
}
// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody.
func MarshalAsJSON(req *policy.Request, v interface{}) error {
if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" {
v = cloneWithoutReadOnlyFields(v)
}
b, err := json.Marshal(v)
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
}
return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON)
}
// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody.
func MarshalAsXML(req *policy.Request, v interface{}) error {
b, err := xml.Marshal(v)
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
}
// inclue the XML header as some services require it
b = []byte(xml.Header + string(b))
return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML)
}
// SetMultipartFormData writes the specified keys/values as multi-part form
// fields with the specified value. File content must be specified as a ReadSeekCloser.
// All other values are treated as string values.
func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error {
body := bytes.Buffer{}
writer := multipart.NewWriter(&body)
writeContent := func(fieldname, filename string, src io.Reader) error {
fd, err := writer.CreateFormFile(fieldname, filename)
if err != nil {
return err
}
// copy the data to the form file
if _, err = io.Copy(fd, src); err != nil {
return err
}
return nil
}
for k, v := range formData {
if rsc, ok := v.(io.ReadSeekCloser); ok {
if err := writeContent(k, k, rsc); err != nil {
return err
}
continue
} else if rscs, ok := v.([]io.ReadSeekCloser); ok {
for _, rsc := range rscs {
if err := writeContent(k, k, rsc); err != nil {
return err
}
}
continue
}
// ensure the value is in string format
s, ok := v.(string)
if !ok {
s = fmt.Sprintf("%v", v)
}
if err := writer.WriteField(k, s); err != nil {
return err
}
}
if err := writer.Close(); err != nil {
return err
}
return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType())
}
// SkipBodyDownload will disable automatic downloading of the response body.
func SkipBodyDownload(req *policy.Request) {
req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true})
}
// returns a clone of the object graph pointed to by v, omitting values of all read-only
// fields. if there are no read-only fields in the object graph, no clone is created.
func cloneWithoutReadOnlyFields(v interface{}) interface{} {
val := reflect.Indirect(reflect.ValueOf(v))
if val.Kind() != reflect.Struct {
// not a struct, skip
return v
}
// first walk the graph to find any R/O fields.
// if there aren't any, skip cloning the graph.
if !recursiveFindReadOnlyField(val) {
return v
}
return recursiveCloneWithoutReadOnlyFields(val)
}
// returns true if any field in the object graph of val contains the `azure:"ro"` tag value
func recursiveFindReadOnlyField(val reflect.Value) bool {
t := val.Type()
// iterate over the fields, looking for the "azure" tag.
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
aztag := field.Tag.Get("azure")
if azureTagIsReadOnly(aztag) {
return true
} else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) {
return true
}
}
return false
}
// clones the object graph of val. all non-R/O properties are copied to the clone
func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} {
t := val.Type()
clone := reflect.New(t)
// iterate over the fields, looking for the "azure" tag.
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
aztag := field.Tag.Get("azure")
if azureTagIsReadOnly(aztag) {
// omit from payload
continue
}
// clone field will receive the same value as the source field...
value := val.Field(i)
v := reflect.Indirect(value)
if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct {
// ...unless the source value is a struct, in which case we recurse to clone that struct.
// (We can't recursively clone time.Time because it contains unexported fields.)
c := recursiveCloneWithoutReadOnlyFields(v)
if field.Anonymous {
// NOTE: this does not handle the case of embedded fields of unexported struct types.
// this should be ok as we don't generate any code like this at present
value = reflect.Indirect(reflect.ValueOf(c))
} else {
value = reflect.ValueOf(c)
}
}
reflect.Indirect(clone).Field(i).Set(value)
}
return clone.Interface()
}
// returns true if the "azure" tag contains the option "ro"
func azureTagIsReadOnly(tag string) bool {
if tag == "" {
return false
}
parts := strings.Split(tag, ",")
for _, part := range parts {
if part == "ro" {
return true
}
}
return false
}

View file

@ -0,0 +1,136 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"bytes"
"encoding/base64"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
// Payload reads and returns the response body or an error.
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
func Payload(resp *http.Response) ([]byte, error) {
return exported.Payload(resp)
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
return exported.HasStatusCode(resp, statusCodes...)
}
// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v.
func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error {
p, err := Payload(resp)
if err != nil {
return err
}
return DecodeByteArray(string(p), v, format)
}
// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v.
func UnmarshalAsJSON(resp *http.Response, v interface{}) error {
payload, err := Payload(resp)
if err != nil {
return err
}
// TODO: verify early exit is correct
if len(payload) == 0 {
return nil
}
err = removeBOM(resp)
if err != nil {
return err
}
err = json.Unmarshal(payload, v)
if err != nil {
err = fmt.Errorf("unmarshalling type %T: %s", v, err)
}
return err
}
// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v.
func UnmarshalAsXML(resp *http.Response, v interface{}) error {
payload, err := Payload(resp)
if err != nil {
return err
}
// TODO: verify early exit is correct
if len(payload) == 0 {
return nil
}
err = removeBOM(resp)
if err != nil {
return err
}
err = xml.Unmarshal(payload, v)
if err != nil {
err = fmt.Errorf("unmarshalling type %T: %s", v, err)
}
return err
}
// Drain reads the response body to completion then closes it. The bytes read are discarded.
func Drain(resp *http.Response) {
if resp != nil && resp.Body != nil {
_, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
// removeBOM removes any byte-order mark prefix from the payload if present.
func removeBOM(resp *http.Response) error {
payload, err := Payload(resp)
if err != nil {
return err
}
// UTF8
trimmed := bytes.TrimPrefix(payload, []byte("\xef\xbb\xbf"))
if len(trimmed) < len(payload) {
resp.Body.(shared.BytesSetter).Set(trimmed)
}
return nil
}
// DecodeByteArray will base-64 decode the provided string into v.
func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error {
if len(s) == 0 {
return nil
}
payload := string(s)
if payload[0] == '"' {
// remove surrounding quotes
payload = payload[1 : len(payload)-1]
}
switch format {
case Base64StdFormat:
decoded, err := base64.StdEncoding.DecodeString(payload)
if err == nil {
*v = decoded
return nil
}
return err
case Base64URLFormat:
// use raw encoding as URL format should not contain any '=' characters
decoded, err := base64.RawURLEncoding.DecodeString(payload)
if err == nil {
*v = decoded
return nil
}
return err
default:
return fmt.Errorf("unrecognized byte array format: %d", format)
}
}

View file

@ -0,0 +1,37 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package runtime
import (
"crypto/tls"
"net"
"net/http"
"time"
)
var defaultHTTPClient *http.Client
func init() {
defaultTransport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
}
defaultHTTPClient = &http.Client{
Transport: defaultTransport,
}
}

View file

@ -0,0 +1,9 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package streaming contains helpers for streaming IO operations and progress reporting.
package streaming

View file

@ -0,0 +1,72 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package streaming
import (
"io"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
)
type progress struct {
rc io.ReadCloser
rsc io.ReadSeekCloser
pr func(bytesTransferred int64)
offset int64
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return exported.NopCloser(rs)
}
// NewRequestProgress adds progress reporting to an HTTP request's body stream.
func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser {
return &progress{
rc: body,
rsc: body,
pr: pr,
offset: 0,
}
}
// NewResponseProgress adds progress reporting to an HTTP response's body stream.
func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser {
return &progress{
rc: body,
rsc: nil,
pr: pr,
offset: 0,
}
}
// Read reads a block of data from an inner stream and reports progress
func (p *progress) Read(b []byte) (n int, err error) {
n, err = p.rc.Read(b)
if err != nil && err != io.EOF {
return
}
p.offset += int64(n)
// Invokes the user's callback method to report progress
p.pr(p.offset)
return
}
// Seek only expects a zero or from beginning.
func (p *progress) Seek(offset int64, whence int) (int64, error) {
// This should only ever be called with offset = 0 and whence = io.SeekStart
n, err := p.rsc.Seek(offset, whence)
if err == nil {
p.offset = int64(n)
}
return n, err
}
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
func (p *progress) Close() error {
return p.rc.Close()
}

View file

@ -0,0 +1,9 @@
//go:build go1.18
// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
// Package to contains various type-conversion helper functions.
package to

View file

@ -0,0 +1,21 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package to
// Ptr returns a pointer to the provided value.
func Ptr[T any](v T) *T {
return &v
}
// SliceOfPtrs returns a slice of *T from the specified values.
func SliceOfPtrs[T any](vv ...T) []*T {
slc := make([]*T, len(vv))
for i := range vv {
slc[i] = Ptr(vv[i])
}
return slc
}

View file

@ -0,0 +1,41 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package tracing
// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends.
type SpanKind int
const (
// SpanKindInternal indicates the span represents an internal operation within an application.
SpanKindInternal SpanKind = 1
// SpanKindServer indicates the span covers server-side handling of a request.
SpanKindServer SpanKind = 2
// SpanKindClient indicates the span describes a request to a remote service.
SpanKindClient SpanKind = 3
// SpanKindProducer indicates the span was created by a messaging producer.
SpanKindProducer SpanKind = 4
// SpanKindConsumer indicates the span was created by a messaging consumer.
SpanKindConsumer SpanKind = 5
)
// SpanStatus represents the status of a span.
type SpanStatus int
const (
// SpanStatusUnset is the default status code.
SpanStatusUnset SpanStatus = 0
// SpanStatusError indicates the operation contains an error.
SpanStatusError SpanStatus = 1
// SpanStatusOK indicates the operation completed successfully.
SpanStatusOK SpanStatus = 2
)

View file

@ -0,0 +1,168 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Package tracing contains the definitions needed to support distributed tracing.
package tracing
import (
"context"
)
// ProviderOptions contains the optional values when creating a Provider.
type ProviderOptions struct {
// for future expansion
}
// NewProvider creates a new Provider with the specified values.
// - newTracerFn is the underlying implementation for creating Tracer instances
// - options contains optional values; pass nil to accept the default value
func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider {
return Provider{
newTracerFn: newTracerFn,
}
}
// Provider is the factory that creates Tracer instances.
// It defaults to a no-op provider.
type Provider struct {
newTracerFn func(name, version string) Tracer
}
// NewTracer creates a new Tracer for the specified name and version.
// - name - the name of the tracer object, typically the fully qualified name of the service client
// - version - the version of the module in which the service client resides
func (p Provider) NewTracer(name, version string) (tracer Tracer) {
if p.newTracerFn != nil {
tracer = p.newTracerFn(name, version)
}
return
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// TracerOptions contains the optional values when creating a Tracer.
type TracerOptions struct {
// for future expansion
}
// NewTracer creates a Tracer with the specified values.
// - newSpanFn is the underlying implementation for creating Span instances
// - options contains optional values; pass nil to accept the default value
func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer {
return Tracer{
newSpanFn: newSpanFn,
}
}
// Tracer is the factory that creates Span instances.
type Tracer struct {
newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span)
}
// Start creates a new span and a context.Context that contains it.
// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span
// - spanName identifies the span within a trace, it's typically the fully qualified API name
// - options contains optional values for the span, pass nil to accept any defaults
func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) {
if t.newSpanFn != nil {
return t.newSpanFn(ctx, spanName, options)
}
return ctx, Span{}
}
// SpanOptions contains optional settings for creating a span.
type SpanOptions struct {
// Kind indicates the kind of Span.
Kind SpanKind
// Attributes contains key-value pairs of attributes for the span.
Attributes []Attribute
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// SpanImpl abstracts the underlying implementation for Span,
// allowing it to work with various tracing implementations.
// Any zero-values will have their default, no-op behavior.
type SpanImpl struct {
// End contains the implementation for the Span.End method.
End func()
// SetAttributes contains the implementation for the Span.SetAttributes method.
SetAttributes func(...Attribute)
// AddEvent contains the implementation for the Span.AddEvent method.
AddEvent func(string, ...Attribute)
// AddError contains the implementation for the Span.AddError method.
AddError func(err error)
// SetStatus contains the implementation for the Span.SetStatus method.
SetStatus func(SpanStatus, string)
}
// NewSpan creates a Span with the specified implementation.
func NewSpan(impl SpanImpl) Span {
return Span{
impl: impl,
}
}
// Span is a single unit of a trace. A trace can contain multiple spans.
// A zero-value Span provides a no-op implementation.
type Span struct {
impl SpanImpl
}
// End terminates the span and MUST be called before the span leaves scope.
// Any further updates to the span will be ignored after End is called.
func (s Span) End() {
if s.impl.End != nil {
s.impl.End()
}
}
// SetAttributes sets the specified attributes on the Span.
// Any existing attributes with the same keys will have their values overwritten.
func (s Span) SetAttributes(attrs ...Attribute) {
if s.impl.SetAttributes != nil {
s.impl.SetAttributes(attrs...)
}
}
// AddEvent adds a named event with an optional set of attributes to the span.
func (s Span) AddEvent(name string, attrs ...Attribute) {
if s.impl.AddEvent != nil {
s.impl.AddEvent(name, attrs...)
}
}
// AddError adds the specified error event to the span.
func (s Span) AddError(err error) {
if s.impl.AddError != nil {
s.impl.AddError(err)
}
}
// SetStatus sets the status on the span along with a description.
func (s Span) SetStatus(code SpanStatus, desc string) {
if s.impl.SetStatus != nil {
s.impl.SetStatus(code, desc)
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Attribute is a key-value pair.
type Attribute struct {
// Key is the name of the attribute.
Key string
// Value is the attribute's value.
// Types that are natively supported include int64, float64, int, bool, string.
// Any other type will be formatted per rules of fmt.Sprintf("%v").
Value any
}

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE

View file

@ -0,0 +1,51 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package diag
import (
"fmt"
"runtime"
"strings"
)
// Caller returns the file and line number of a frame on the caller's stack.
// If the funtion fails an empty string is returned.
// skipFrames - the number of frames to skip when determining the caller.
// Passing a value of 0 will return the immediate caller of this function.
func Caller(skipFrames int) string {
if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok {
// the skipFrames + 1 is to skip ourselves
frame := runtime.FuncForPC(pc)
return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line)
}
return ""
}
// StackTrace returns a formatted stack trace string.
// If the funtion fails an empty string is returned.
// skipFrames - the number of stack frames to skip before composing the trace string.
// totalFrames - the maximum number of stack frames to include in the trace string.
func StackTrace(skipFrames, totalFrames int) string {
pcCallers := make([]uintptr, totalFrames)
if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 {
return ""
}
frames := runtime.CallersFrames(pcCallers)
sb := strings.Builder{}
for {
frame, more := frames.Next()
sb.WriteString(frame.Function)
sb.WriteString("()\n\t")
sb.WriteString(frame.File)
sb.WriteRune(':')
sb.WriteString(fmt.Sprintf("%d\n", frame.Line))
if !more {
break
}
}
return sb.String()
}

View file

@ -0,0 +1,7 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package diag

View file

@ -0,0 +1,7 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package errorinfo

View file

@ -0,0 +1,16 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package errorinfo
// NonRetriable represents a non-transient error. This works in
// conjunction with the retry policy, indicating that the error condition
// is idempotent, so no retries will be attempted.
// Use errors.As() to access this interface in the error chain.
type NonRetriable interface {
error
NonRetriable()
}

View file

@ -0,0 +1,7 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package log

View file

@ -0,0 +1,104 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package log
import (
"fmt"
"os"
"time"
)
///////////////////////////////////////////////////////////////////////////////////////////////////
// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY
///////////////////////////////////////////////////////////////////////////////////////////////////
// Event is used to group entries. Each group can be toggled on or off.
type Event string
// SetEvents is used to control which events are written to
// the log. By default all log events are writen.
func SetEvents(cls ...Event) {
log.cls = cls
}
// SetListener will set the Logger to write to the specified listener.
func SetListener(lst func(Event, string)) {
log.lst = lst
}
///////////////////////////////////////////////////////////////////////////////////////////////////
// END PUBLIC SURFACE AREA
///////////////////////////////////////////////////////////////////////////////////////////////////
// Should returns true if the specified log event should be written to the log.
// By default all log events will be logged. Call SetEvents() to limit
// the log events for logging.
// If no listener has been set this will return false.
// Calling this method is useful when the message to log is computationally expensive
// and you want to avoid the overhead if its log event is not enabled.
func Should(cls Event) bool {
if log.lst == nil {
return false
}
if log.cls == nil || len(log.cls) == 0 {
return true
}
for _, c := range log.cls {
if c == cls {
return true
}
}
return false
}
// Write invokes the underlying listener with the specified event and message.
// If the event shouldn't be logged or there is no listener then Write does nothing.
func Write(cls Event, message string) {
if !Should(cls) {
return
}
log.lst(cls, message)
}
// Writef invokes the underlying listener with the specified event and formatted message.
// If the event shouldn't be logged or there is no listener then Writef does nothing.
func Writef(cls Event, format string, a ...interface{}) {
if !Should(cls) {
return
}
log.lst(cls, fmt.Sprintf(format, a...))
}
// TestResetEvents is used for TESTING PURPOSES ONLY.
func TestResetEvents() {
log.cls = nil
}
// logger controls which events to log and writing to the underlying log.
type logger struct {
cls []Event
lst func(Event, string)
}
// the process-wide logger
var log logger
func init() {
initLogging()
}
// split out for testing purposes
func initLogging() {
if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" {
// cls could be enhanced to support a comma-delimited list of log events
log.lst = func(cls Event, msg string) {
// simple console logger, it writes to stderr in the following format:
// [time-stamp] Event: message
fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg)
}
}
}

View file

@ -0,0 +1,123 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package temporal
import (
"sync"
"time"
)
// AcquireResource abstracts a method for refreshing a temporal resource.
type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error)
// Resource is a temporal resource (usually a credential) that requires periodic refreshing.
type Resource[TResource, TState any] struct {
// cond is used to synchronize access to the shared resource embodied by the remaining fields
cond *sync.Cond
// acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource
acquiring bool
// resource contains the value of the shared resource
resource TResource
// expiration indicates when the shared resource expires; it is 0 if the resource was never acquired
expiration time.Time
// lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource
lastAttempt time.Time
// acquireResource is the callback function that actually acquires the resource
acquireResource AcquireResource[TResource, TState]
}
// NewResource creates a new Resource that uses the specified AcquireResource for refreshing.
func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] {
return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
}
// Get returns the underlying resource.
// If the resource is fresh, no refresh is performed.
func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
// If the resource is expiring within this time window, update it eagerly.
// This allows other threads/goroutines to keep running by using the not-yet-expired
// resource value while one thread/goroutine updates the resource.
const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration
const backoff = 30 * time.Second // Minimum wait time between eager update attempts
now, acquire, expired := time.Now(), false, false
// acquire exclusive lock
er.cond.L.Lock()
resource := er.resource
for {
expired = er.expiration.IsZero() || er.expiration.Before(now)
if expired {
// The resource was never acquired or has expired
if !er.acquiring {
// If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it
er.acquiring, acquire = true, true
break
}
// Getting here means that this thread/goroutine will wait for the updated resource
} else if er.expiration.Add(-window).Before(now) {
// The resource is valid but is expiring within the time window
if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) {
// If another thread/goroutine is not acquiring/renewing the resource, and none has attempted
// to do so within the last 30 seconds, this thread/goroutine will do it
er.acquiring, acquire = true, true
break
}
// This thread/goroutine will use the existing resource value while another updates it
resource = er.resource
break
} else {
// The resource is not close to expiring, this thread/goroutine should use its current value
resource = er.resource
break
}
// If we get here, wait for the new resource value to be acquired/updated
er.cond.Wait()
}
er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked
var err error
if acquire {
// This thread/goroutine has been selected to acquire/update the resource
var expiration time.Time
var newValue TResource
er.lastAttempt = now
newValue, expiration, err = er.acquireResource(state)
// Atomically, update the shared resource's new value & expiration.
er.cond.L.Lock()
if err == nil {
// Update resource & expiration, return the new value
resource = newValue
er.resource, er.expiration = resource, expiration
} else if !expired {
// An eager update failed. Discard the error and return the current--still valid--resource value
err = nil
}
er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource
// Wake up any waiting threads/goroutines since there is a resource they can ALL use
er.cond.L.Unlock()
er.cond.Broadcast()
}
return resource, err // Return the resource this thread/goroutine can use
}
// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get().
func (er *Resource[TResource, TState]) Expire() {
er.cond.L.Lock()
defer er.cond.L.Unlock()
// Reset the expiration as if we never got this resource to begin with
er.expiration = time.Time{}
}

View file

@ -0,0 +1,7 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package uuid

View file

@ -0,0 +1,76 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package uuid
import (
"crypto/rand"
"errors"
"fmt"
"strconv"
)
// The UUID reserved variants.
const (
reservedRFC4122 byte = 0x40
)
// A UUID representation compliant with specification in RFC4122 document.
type UUID [16]byte
// New returns a new UUID using the RFC4122 algorithm.
func New() (UUID, error) {
u := UUID{}
// Set all bits to pseudo-random values.
// NOTE: this takes a process-wide lock
_, err := rand.Read(u[:])
if err != nil {
return u, err
}
u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
var version byte = 4
u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
return u, nil
}
// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format.
func (u UUID) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
}
// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID.
func Parse(s string) (UUID, error) {
var uuid UUID
// ensure format
switch len(s) {
case 36:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 38:
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
s = s[1:37]
default:
return uuid, errors.New("invalid UUID format")
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return uuid, errors.New("invalid UUID format")
}
// parse chunks
for i, x := range [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
b, err := strconv.ParseUint(s[x:x+2], 16, 8)
if err != nil {
return uuid, fmt.Errorf("invalid UUID format: %s", err)
}
uuid[i] = byte(b)
}
return uuid, nil
}

View file

@ -0,0 +1,167 @@
# Release History
## 1.0.0 (2023-02-07)
### Features Added
* Add support to log calculated block size and count during uploads
* Added MissingSharedKeyCredential error type for cleaner UX. Related to [#19864](https://github.com/Azure/azure-sdk-for-go/issues/19864).
### Breaking Changes
* Changed API signatures to map correctly to Azure Storage REST APIs, These changes impact:
* `blob.GetSASURL()`
* `blockblob.StageBlockFromURL()`
* `container.SetAccessPolicy()`
* `container.GetSASURL()`
* `service.GetSASURL()`
* `service.FilterBlobs()`
* `lease.AcquireLease()` (blobs and containers)
* `lease.ChangeLease()` (blobs and containers)
* Type name changes:
* `CpkInfo` -> `CPKInfo`
* `CpkScopeInfo` -> `CPKScopeInfo`
* `RuleId` -> `RuleID`
* `PolicyId` -> `PolicyID`
* `CorsRule` -> `CORSRule`
* Remove `AccountServices` it is now hardcoded to blobs
### Bugs Fixed
* Fixed encoding issues seen in FilterBlobs. Fixes [#17421](https://github.com/Azure/azure-sdk-for-go/issues/17421).
* Fixing inconsistency seen with Metadata and ORS response. Fixes [#19688](https://github.com/Azure/azure-sdk-for-go/issues/19688).
* Fixed endless loop during pagination issue [#19773](https://github.com/Azure/azure-sdk-for-go/pull/19773).
### Other Changes
* Exported some missing types in the `blob`, `container` and `service` packages. Fixes [#19775](https://github.com/Azure/azure-sdk-for-go/issues/19775).
* SAS changes [#19781](https://github.com/Azure/azure-sdk-for-go/pull/19781):
* AccountSASPermissions: SetImmutabilityPolicy support
* ContainerSASPermissions: Move support
* Validations to ensure correct sas perm ordering
## 0.6.1 (2022-12-09)
### Bugs Fixed
* Fix compilation error on Darwin.
## 0.6.0 (2022-12-08)
### Features Added
* Added BlobDeleteType to DeleteOptions to allow access to ['Permanent'](https://learn.microsoft.com/rest/api/storageservices/delete-blob#permanent-delete) DeleteType.
* Added [Set Blob Expiry API](https://learn.microsoft.com/rest/api/storageservices/set-blob-expiry).
* Added method `ServiceClient()` to the `azblob.Client` type, allowing access to the underlying service client.
* Added support for object level immutability policy with versioning (Version Level WORM).
* Added the custom CRC64 polynomial used by storage for transactional hashes, and implemented automatic hashing for transactions.
### Breaking Changes
* Corrected the name for `saoid` and `suoid` SAS parameters in `BlobSignatureValues` struct as per [this](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas#construct-a-user-delegation-sas)
* Updated type of `BlockSize` from int to int64 in `UploadStreamOptions`
* CRC64 transactional hashes are now supplied with a `uint64` rather than a `[]byte` to conform with Golang's `hash/crc64` package
* Field `XMSContentCRC64` has been renamed to `ContentCRC64`
* The `Lease*` constant types and values in the `blob` and `container` packages have been moved to the `lease` package and their names fixed up to avoid stuttering.
* Fields `TransactionalContentCRC64` and `TransactionalContentMD5` have been replaced by `TransactionalValidation`.
* Fields `SourceContentCRC64` and `SourceContentMD5` have been replaced by `SourceContentValidation`.
* Field `TransactionalContentMD5` has been removed from type `AppendBlockFromURLOptions`.
### Bugs Fixed
* Corrected signing of User Delegation SAS. Fixes [#19372](https://github.com/Azure/azure-sdk-for-go/issues/19372) and [#19454](https://github.com/Azure/azure-sdk-for-go/issues/19454)
* Added formatting of start and expiry time in [SetAccessPolicy](https://learn.microsoft.com/rest/api/storageservices/set-container-acl#request-body). Fixes [#18712](https://github.com/Azure/azure-sdk-for-go/issues/18712)
* Uploading block blobs larger than 256MB can fail in some cases with error `net/http: HTTP/1.x transport connection broken`.
* Blob name parameters are URL-encoded before constructing the complete blob URL.
### Other Changes
* Added some missing public surface area in the `container` and `service` packages.
* The `UploadStream()` methods now use anonymous memory mapped files for buffers in order to reduce heap allocations/fragmentation.
* The anonymous memory mapped files are typically backed by the page/swap file, multiple files are not actually created.
## 0.5.1 (2022-10-11)
### Bugs Fixed
* `GetSASURL()`: for container and blob clients, don't add a forward slash before the query string
* Fixed issue [#19249](https://github.com/Azure/azure-sdk-for-go/issues/19249) by increasing service version to '2020-02-10'.
### Other Changes
* Improved docs for client constructors.
* Updating azcore version to 1.1.4
## 0.5.0 (2022-09-29)
### Breaking Changes
* Complete architectural change for better user experience. Please view the [README](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme)
### Features Added
* Added [UserDelegationCredential](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas) which resolves [#18976](https://github.com/Azure/azure-sdk-for-go/issues/18976), [#16916](https://github.com/Azure/azure-sdk-for-go/issues/16916), [#18977](https://github.com/Azure/azure-sdk-for-go/issues/18977)
* Added [Restore Container API](https://learn.microsoft.com/rest/api/storageservices/restore-container).
### Bugs Fixed
* Fixed issue [#18767](https://github.com/Azure/azure-sdk-for-go/issues/18767)
* Fix deadlock when error writes are slow [#16937](https://github.com/Azure/azure-sdk-for-go/pull/16937)
## 0.4.1 (2022-05-12)
### Other Changes
* Updated to latest `azcore` and `internal` modules
## 0.4.0 (2022-04-19)
### Breaking Changes
* Fixed Issue #17150 : Renaming/refactoring high level methods.
* Fixed Issue #16972 : Constructors should return clients by reference.
* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags
remains the same.
### Bugs Fixed
* Fixed Issue #17515 : SetTags options bag missing leaseID.
* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`.
* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call.
* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID
* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods
ignoring the options bag.
* Fixed Issue #16920 : Fixing error handling example.
* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations.
* Fixed Issue #16679 : Response parsing issue in List blobs API.
## 0.3.0 (2022-02-09)
### Breaking Changes
* Updated to latest `azcore`. Public surface area is unchanged.
* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is
now `*RetryReaderOptions`.
### Bugs Fixed
* Fixed Issue #16193 : `azblob.GetSASToken` wrong signed resource.
* Fixed Issue #16223 : `HttpRange` does not expose its fields.
* Fixed Issue #16254 : Issue passing reader to upload `BlockBlobClient`
* Fixed Issue #16295 : Problem with listing blobs by using of `ListBlobsHierarchy()`
* Fixed Issue #16542 : Empty `StorageError` in the Azurite environment
* Fixed Issue #16679 : Unable to access Metadata when listing blobs
* Fixed Issue #16816 : `ContainerClient.GetSASToken` doesn't allow list permission.
* Fixed Issue #16988 : Too many arguments in call to `runtime.NewResponseError`
## 0.2.0 (2021-11-03)
### Breaking Changes
* Clients now have one constructor per authentication method
## 0.1.0 (2021-09-13)
### Features Added
* This is the initial preview release of the `azblob` library

View file

@ -0,0 +1,274 @@
# Azure Blob Storage SDK for Go
> Server Version: 2020-10-02
Azure Blob storage is Microsoft's object storage solution for the cloud. Blob
storage is optimized for storing massive amounts of unstructured data.
Unstructured data is data that does not adhere to a particular data model or
definition, such as text or binary data.
[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs]
## Getting started
### Install the package
Install the Azure Blob Storage SDK for Go with [go get][goget]:
```Powershell
go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
```
If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module.
```Powershell
go get github.com/Azure/azure-sdk-for-go/sdk/azidentity
```
### Prerequisites
A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases).
You need an [Azure subscription][azure_sub] and a
[Storage Account][storage_account_docs] to use this package.
To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal],
[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli].
Here's an example using the Azure CLI:
```Powershell
az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS
```
### Authenticate the client
In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services.
```go
// create a credential for authenticating with Azure Active Directory
cred, err := azidentity.NewDefaultAzureCredential(nil)
// TODO: handle err
// create an azblob.Client for the specified storage account that uses the above credential
client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/", cred, nil)
// TODO: handle err
```
Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps).
## Key concepts
Blob storage is designed for:
- Serving images or documents directly to a browser.
- Storing files for distributed access.
- Streaming video and audio.
- Writing to log files.
- Storing data for backup and restore, disaster recovery, and archiving.
- Storing data for analysis by an on-premises or Azure-hosted service.
Blob storage offers three types of resources:
- The _storage account_
- One or more _containers_ in a storage account
- One ore more _blobs_ in a container
Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account.
The storage account is specified when the `azblob.Client` is constructed.
Use the appropriate client constructor function for the authentication mechanism you wish to use.
Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go)
### Goroutine safety
We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines.
### About blob metadata
Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters.
### Additional concepts
<!-- CLIENT COMMON BAR -->
[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) |
[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) |
[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) |
[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log)
<!-- CLIENT COMMON BAR -->
## Examples
### Uploading a blob
```go
const (
account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/"
containerName = "sample-container"
blobName = "sample-blob"
sampleFile = "path/to/sample/file"
)
// authenticate with Azure Active Directory
cred, err := azidentity.NewDefaultAzureCredential(nil)
// TODO: handle error
// create a client for the specified storage account
client, err := azblob.NewClient(account, cred, nil)
// TODO: handle error
// open the file for reading
file, err := os.OpenFile(sampleFile, os.O_RDONLY, 0)
// TODO: handle error
defer file.Close()
// upload the file to the specified container with the specified blob name
_, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil)
// TODO: handle error
```
### Downloading a blob
```go
// this example accesses a public blob via anonymous access, so no credentials are required
client, err := azblob.NewClientWithNoCredential("https://azurestoragesamples.blob.core.windows.net/", nil)
// TODO: handle error
// create or open a local file where we can download the blob
file, err := os.Create("cloud.jpg")
// TODO: handle error
defer file.Close()
// download the blob
_, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil)
// TODO: handle error
```
### Enumerating blobs
```go
const (
account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/"
containerName = "sample-container"
)
// authenticate with Azure Active Directory
cred, err := azidentity.NewDefaultAzureCredential(nil)
// TODO: handle error
// create a client for the specified storage account
client, err := azblob.NewClient(account, cred, nil)
// TODO: handle error
// blob listings are returned across multiple pages
pager := client.NewListBlobsFlatPager(containerName, nil)
// continue fetching pages until no more remain
for pager.More() {
// advance to the next page
page, err := pager.NextPage(context.TODO())
// TODO: handle error
// print the blob names for this page
for _, blob := range page.Segment.BlobItems {
fmt.Println(*blob.Name)
}
}
```
## Troubleshooting
All Blob service operations will return an
[*azcore.ResponseError][azcore_response_error] on failure with a
populated `ErrorCode` field. Many of these errors are recoverable.
The [bloberror][blob_error] package provides the possible Storage error codes
along with various helper facilities for error handling.
```go
const (
connectionString = "<connection_string>"
containerName = "sample-container"
)
// create a client with the provided connection string
client, err := azblob.NewClientFromConnectionString(connectionString, nil)
// TODO: handle error
// try to delete the container, avoiding any potential race conditions with an in-progress or completed deletion
_, err = client.DeleteContainer(context.TODO(), containerName, nil)
if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNotFound) {
// ignore any errors if the container is being deleted or already has been deleted
} else if err != nil {
// TODO: some other error
}
```
## Next steps
Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more.
### Specialized clients
The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages.
Use these clients when you need to interact with a specific kind of blob.
Learn more about the various types of blobs from the following links.
- [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs)
- [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs)
- [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs)
The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more.
The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases.
The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more.
The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more.
The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens.
See the package's documentation for more information.
## Contributing
See the [Storage CONTRIBUTING.md][storage_contrib] for details on building,
testing, and contributing to this library.
This project welcomes contributions and suggestions. Most contributions require
you to agree to a Contributor License Agreement (CLA) declaring that you have
the right to, and actually do, grant us the rights to use your contribution. For
details, visit [cla.microsoft.com][cla].
This project has adopted the [Microsoft Open Source Code of Conduct][coc].
For more information see the [Code of Conduct FAQ][coc_faq]
or contact [opencode@microsoft.com][coc_contact] with any
additional questions or comments.
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fstorage%2Fazblob%2FREADME.png)
<!-- LINKS -->
[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob
[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api
[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview
[godevdl]: https://go.dev/dl/
[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them
[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview
[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell
[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli
[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal
[azure_cli]: https://docs.microsoft.com/cli/azure
[azure_sub]: https://azure.microsoft.com/free/
[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity
[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad
[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError
[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go
[append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go
[blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blob/client.go
[blob_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/bloberror/error_codes.go
[block_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blockblob/client.go
[container]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/container/client.go
[lease]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/lease
[page_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/pageblob/client.go
[sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/sas
[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/service/client.go
[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md
[cla]: https://cla.microsoft.com
[coc]: https://opensource.microsoft.com/codeofconduct/
[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/
[coc_contact]: mailto:opencode@microsoft.com

View file

@ -0,0 +1,352 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package appendblob
import (
"context"
"io"
"os"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a client to an Azure Storage append blob;
type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient]
// NewClient creates an instance of Client with the specified values.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName,
exported.ModuleVersion, runtime.PipelineOptions{},
&conOptions.ClientOptions)
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
// This is used to anonymously access a blob or with a shared access signature (SAS) token.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt?<sas token>
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName,
exported.ModuleVersion,
runtime.PipelineOptions{},
&conOptions.ClientOptions)
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
// - cred - a SharedKeyCredential created with the matching blob's storage account and access key
// - options - client options; pass nil to accept the default values
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName,
exported.ModuleVersion,
runtime.PipelineOptions{},
&conOptions.ClientOptions)
return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
// - connectionString - a connection string for the desired storage account
// - containerName - the name of the container within the storage account
// - blobName - the name of the blob within the container
// - options - client options; pass nil to accept the default values
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName)
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
}
return NewClientWithNoCredential(parsed.ServiceURL, options)
}
// BlobClient returns the embedded blob client for this AppendBlob client.
func (ab *Client) BlobClient() *blob.Client {
innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab))
return (*blob.Client)(innerBlob)
}
func (ab *Client) sharedKey() *blob.SharedKeyCredential {
return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab))
}
func (ab *Client) generated() *generated.AppendBlobClient {
_, appendBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab))
return appendBlob
}
func (ab *Client) innerBlobGenerated() *generated.BlobClient {
b := ab.BlobClient()
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (ab *Client) URL() string {
return ab.generated().Endpoint()
}
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (ab *Client) WithSnapshot(snapshot string) (*Client, error) {
p, err := blob.ParseURL(ab.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (ab *Client) WithVersionID(versionID string) (*Client, error) {
p, err := blob.ParseURL(ab.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil
}
// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (ab *Client) Create(ctx context.Context, o *CreateOptions) (CreateResponse, error) {
opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format()
resp, err := ab.generated().Create(ctx, 0, opts, httpHeaders, leaseAccessConditions, cpkInfo,
cpkScopeInfo, modifiedAccessConditions)
return resp, err
}
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *AppendBlockOptions) (AppendBlockResponse, error) {
count, err := shared.ValidateSeekableStreamAt0AndGetCount(body)
if err != nil {
return AppendBlockResponse{}, nil
}
appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format()
if o != nil && o.TransactionalValidation != nil {
body, err = o.TransactionalValidation.Apply(body, appendOptions)
if err != nil {
return AppendBlockResponse{}, nil
}
}
resp, err := ab.generated().AppendBlock(ctx,
count,
body,
appendOptions,
leaseAccessConditions,
appendPositionAccessConditions,
cpkInfo,
cpkScope,
modifiedAccessConditions)
return resp, err
}
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) {
appendBlockFromURLOptions,
cpkInfo,
cpkScopeInfo,
leaseAccessConditions,
appendPositionAccessConditions,
modifiedAccessConditions,
sourceModifiedAccessConditions := o.format()
// content length should be 0 on * from URL. always. It's a 400 if it isn't.
resp, err := ab.generated().AppendBlockFromURL(ctx,
source,
0,
appendBlockFromURLOptions,
cpkInfo,
cpkScopeInfo,
leaseAccessConditions,
appendPositionAccessConditions,
modifiedAccessConditions,
sourceModifiedAccessConditions)
return resp, err
}
// Seal - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only.
// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal
func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) {
leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format()
resp, err := ab.generated().Seal(ctx,
nil,
leaseAccessConditions,
modifiedAccessConditions,
positionAccessConditions)
return resp, err
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (ab *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) {
return ab.BlobClient().Delete(ctx, o)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) {
return ab.BlobClient().Undelete(ctx, o)
}
// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (ab *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) {
return ab.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options)
}
// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (ab *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) {
return ab.BlobClient().DeleteImmutabilityPolicy(ctx, options)
}
// SetLegalHold operation enables users to set legal hold on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (ab *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) {
return ab.BlobClient().SetLegalHold(ctx, legalHold, options)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) {
return ab.BlobClient().SetTier(ctx, tier, o)
}
// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts.
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry
func (ab *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) {
if expiryType == nil {
expiryType = ExpiryTypeNever{}
}
et, opts := expiryType.Format(o)
resp, err := ab.innerBlobGenerated().SetExpiry(ctx, et, opts)
return resp, err
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) {
return ab.BlobClient().GetProperties(ctx, o)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o)
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) {
return ab.BlobClient().SetMetadata(ctx, metadata, o)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (ab *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) {
return ab.BlobClient().CreateSnapshot(ctx, o)
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (ab *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) {
return ab.BlobClient().StartCopyFromURL(ctx, copySource, o)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (ab *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) {
return ab.BlobClient().AbortCopyFromURL(ctx, copyID, o)
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (ab *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) {
return ab.BlobClient().SetTags(ctx, tags, o)
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) {
return ab.BlobClient().GetTags(ctx, o)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) {
return ab.BlobClient().CopyFromURL(ctx, copySource, o)
}
// Concurrent Download Functions -----------------------------------------------------------------------------------------
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (ab *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) {
return ab.BlobClient().DownloadStream(ctx, o)
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (ab *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) {
return ab.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o)
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (ab *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) {
return ab.BlobClient().DownloadFile(ctx, file, o)
}

View file

@ -0,0 +1,176 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package appendblob
import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// Type Declarations ---------------------------------------------------------------------
// AppendPositionAccessConditions contains a group of parameters for the Client.AppendBlock method.
type AppendPositionAccessConditions = generated.AppendPositionAccessConditions
// Request Model Declaration -------------------------------------------------------------------------------------------
// CreateOptions provides set of configurations for Create Append Blob operation
type CreateOptions struct {
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting
// Specified if a legal hold should be set on the blob.
LegalHold *bool
AccessConditions *blob.AccessConditions
HTTPHeaders *blob.HTTPHeaders
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
// Optional. Used to set blob tags in various blob operations.
Tags map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
// are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
// blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
// See Naming and Referencing Containers, Blobs, and Metadata for more information.
Metadata map[string]*string
}
func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil
}
options := generated.AppendBlobClientCreateOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags),
Metadata: o.Metadata,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
LegalHold: o.LegalHold,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlockOptions contains the optional parameters for the Client.AppendBlock method.
type AppendBlockOptions struct {
// TransactionalValidation specifies the transfer validation type to use.
// The default is nil (no transfer validation).
TransactionalValidation blob.TransferValidationType
AppendPositionAccessConditions *AppendPositionAccessConditions
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
AccessConditions *blob.AccessConditions
}
func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions,
*generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &generated.AppendBlobClientAppendBlockOptions{}, o.AppendPositionAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method.
type AppendBlockFromURLOptions struct {
// SourceContentValidation contains the validation mechanism used on the range of bytes read from the source.
SourceContentValidation blob.SourceContentValidationType
AppendPositionAccessConditions *AppendPositionAccessConditions
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions
AccessConditions *blob.AccessConditions
// Range specifies a range of bytes. The default value is all bytes.
Range blob.HTTPRange
}
func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CPKInfo,
*generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions,
*generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil, nil
}
options := &generated.AppendBlobClientAppendBlockFromURLOptions{
SourceRange: exported.FormatHTTPRange(o.Range),
}
if o.SourceContentValidation != nil {
o.SourceContentValidation.Apply(options)
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, o.CPKInfo, o.CPKScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SealOptions provides set of configurations for SealAppendBlob operation
type SealOptions struct {
AccessConditions *blob.AccessConditions
AppendPositionAccessConditions *AppendPositionAccessConditions
}
func (o *SealOptions) format() (*generated.LeaseAccessConditions,
*generated.ModifiedAccessConditions, *generated.AppendPositionAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return leaseAccessConditions, modifiedAccessConditions, o.AppendPositionAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// ExpiryType defines values for ExpiryType
type ExpiryType = exported.ExpiryType
// ExpiryTypeAbsolute defines the absolute time for the blob expiry
type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute
// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry
type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow
// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry
type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation
// ExpiryTypeNever defines that the blob will be set to never expire
type ExpiryTypeNever = exported.ExpiryTypeNever
// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method.
type SetExpiryOptions = exported.SetExpiryOptions

View file

@ -0,0 +1,26 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package appendblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// CreateResponse contains the response from method Client.Create.
type CreateResponse = generated.AppendBlobClientCreateResponse
// AppendBlockResponse contains the response from method Client.AppendBlock.
type AppendBlockResponse = generated.AppendBlobClientAppendBlockResponse
// AppendBlockFromURLResponse contains the response from method Client.AppendBlockFromURL.
type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLResponse
// SealResponse contains the response from method Client.Seal.
type SealResponse = generated.AppendBlobClientSealResponse
// SetExpiryResponse contains the response from method Client.SetExpiry.
type SetExpiryResponse = generated.BlobClientSetExpiryResponse

View file

@ -0,0 +1,6 @@
{
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/storage/azblob",
"Tag": "go/storage/azblob_46e572d43a"
}

View file

@ -0,0 +1,446 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"context"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"io"
"os"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type Client base.Client[generated.BlobClient]
// NewClient creates an instance of Client with the specified values.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
// This is used to anonymously access a blob or with a shared access signature (SAS) token.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt?<sas token>
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
// - cred - a SharedKeyCredential created with the matching blob's storage account and access key
// - options - client options; pass nil to accept the default values
func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
// - connectionString - a connection string for the desired storage account
// - containerName - the name of the container within the storage account
// - blobName - the name of the blob within the container
// - options - client options; pass nil to accept the default values
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName)
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
}
return NewClientWithNoCredential(parsed.ServiceURL, options)
}
func (b *Client) generated() *generated.BlobClient {
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
func (b *Client) sharedKey() *SharedKeyCredential {
return base.SharedKey((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (b *Client) URL() string {
return b.generated().Endpoint()
}
// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (b *Client) WithSnapshot(snapshot string) (*Client, error) {
p, err := ParseURL(b.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (b *Client) WithVersionID(versionID string) (*Client, error) {
p, err := ParseURL(b.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b *Client) Delete(ctx context.Context, o *DeleteOptions) (DeleteResponse, error) {
deleteOptions, leaseInfo, accessConditions := o.format()
resp, err := b.generated().Delete(ctx, deleteOptions, leaseInfo, accessConditions)
return resp, err
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResponse, error) {
undeleteOptions := o.format()
resp, err := b.generated().Undelete(ctx, undeleteOptions)
return resp, err
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiers see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) {
opts, leaseAccessConditions, modifiedAccessConditions := o.format()
resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions)
return resp, err
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) {
opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions := options.format()
resp, err := b.generated().GetProperties(ctx, opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
return resp, err
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) {
opts, leaseAccessConditions, modifiedAccessConditions := o.format()
resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions)
return resp, err
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (b *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *SetMetadataOptions) (SetMetadataResponse, error) {
basics := generated.BlobClientSetMetadataOptions{Metadata: metadata}
leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format()
resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions)
return resp, err
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (b *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) {
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
// because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this
// performance hit.
opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.generated().CreateSnapshot(ctx, opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions)
return resp, err
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) {
opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.generated().StartCopyFromURL(ctx, copySource, opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions)
return resp, err
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (b *Client) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyFromURLOptions) (AbortCopyFromURLResponse, error) {
opts, leaseAccessConditions := options.format()
resp, err := b.generated().AbortCopyFromURL(ctx, copyID, opts, leaseAccessConditions)
return resp, err
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (b *Client) SetTags(ctx context.Context, tags map[string]string, options *SetTagsOptions) (SetTagsResponse, error) {
serializedTags := shared.SerializeBlobTags(tags)
blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.generated().SetTags(ctx, *serializedTags, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
return resp, err
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsResponse, error) {
blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.generated().GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
return resp, err
}
// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob. Mode defaults to "Unlocked".
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (b *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *SetImmutabilityPolicyOptions) (SetImmutabilityPolicyResponse, error) {
blobSetImmutabilityPolicyOptions, modifiedAccessConditions := options.format()
blobSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry = &expiryTime
resp, err := b.generated().SetImmutabilityPolicy(ctx, blobSetImmutabilityPolicyOptions, modifiedAccessConditions)
return resp, err
}
// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (b *Client) DeleteImmutabilityPolicy(ctx context.Context, options *DeleteImmutabilityPolicyOptions) (DeleteImmutabilityPolicyResponse, error) {
deleteImmutabilityOptions := options.format()
resp, err := b.generated().DeleteImmutabilityPolicy(ctx, deleteImmutabilityOptions)
return resp, err
}
// SetLegalHold operation enables users to set legal hold on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetLegalHoldOptions) (SetLegalHoldResponse, error) {
setLegalHoldOptions := options.format()
resp, err := b.generated().SetLegalHold(ctx, legalHold, setLegalHoldOptions)
return resp, err
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) {
copyOptions, smac, mac, lac := options.format()
resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac)
return resp, err
}
// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) {
if b.sharedKey() == nil {
return "", bloberror.MissingSharedKeyCredential
}
urlParts, err := ParseURL(b.URL())
if err != nil {
return "", err
}
t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot)
if err != nil {
t = time.Time{}
}
st := o.format()
qps, err := sas.BlobSignatureValues{
ContainerName: urlParts.ContainerName,
BlobName: urlParts.BlobName,
SnapshotTime: t,
Version: sas.Version,
Permissions: permissions.String(),
StartTime: st,
ExpiryTime: expiry.UTC(),
}.SignWithSharedKey(b.sharedKey())
if err != nil {
return "", err
}
endpoint := b.URL() + "?" + qps.Encode()
return endpoint, nil
}
// Concurrent Download Functions -----------------------------------------------------------------------------------------
// download downloads an Azure blob to a WriterAt in parallel.
func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) {
if o.BlockSize == 0 {
o.BlockSize = DefaultDownloadBlockSize
}
count := o.Range.Count
if count == CountToEnd { // If size not specified, calculate it
// If we don't have the length at all, get it
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil)
dr, err := b.DownloadStream(ctx, downloadBlobOptions)
if err != nil {
return 0, err
}
count = *dr.ContentLength - o.Range.Offset
}
if count <= 0 {
// The file is empty, there is nothing to download.
return 0, nil
}
// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, chunkStart int64, count int64) error {
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
Offset: chunkStart + o.Range.Offset,
Count: count,
}, nil)
dr, err := b.DownloadStream(ctx, downloadBlobOptions)
if err != nil {
return err
}
var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
body = streaming.NewResponseProgress(
body,
func(bytesTransferred int64) {
diff := bytesTransferred - rangeProgress
rangeProgress = bytesTransferred
progressLock.Lock()
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
_, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body)
if err != nil {
return err
}
err = body.Close()
return err
},
})
if err != nil {
return 0, err
}
return count, nil
}
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) {
downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions := o.format()
if o == nil {
o = &DownloadStreamOptions{}
}
dr, err := b.generated().Download(ctx, downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
if err != nil {
return DownloadStreamResponse{}, err
}
return DownloadStreamResponse{
client: b,
DownloadResponse: dr,
getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag},
ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules),
cpkInfo: o.CPKInfo,
cpkScope: o.CPKScopeInfo,
}, err
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) {
if o == nil {
o = &DownloadBufferOptions{}
}
return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o))
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) {
if o == nil {
o = &DownloadFileOptions{}
}
do := (*downloadOptions)(o)
// 1. Calculate the size of the destination file
var size int64
count := do.Range.Count
if count == CountToEnd {
// Try to get Azure blob's size
getBlobPropertiesOptions := do.getBlobPropertiesOptions()
props, err := b.GetProperties(ctx, getBlobPropertiesOptions)
if err != nil {
return 0, err
}
size = *props.ContentLength - do.Range.Offset
} else {
size = count
}
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
stat, err := file.Stat()
if err != nil {
return 0, err
}
if stat.Size() != size {
if err = file.Truncate(size); err != nil {
return 0, err
}
}
if size > 0 {
return b.download(ctx, file, *do)
} else { // if the blob's size is 0, there is no need in downloading it
return 0, nil
}
}

View file

@ -0,0 +1,229 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
const (
CountToEnd = 0
SnapshotTimeFormat = exported.SnapshotTimeFormat
// DefaultDownloadBlockSize is default block size
DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
)
// BlobType defines values for BlobType
type BlobType = generated.BlobType
const (
BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob
BlobTypePageBlob BlobType = generated.BlobTypePageBlob
BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob
)
// PossibleBlobTypeValues returns the possible values for the BlobType const type.
func PossibleBlobTypeValues() []BlobType {
return generated.PossibleBlobTypeValues()
}
// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType
type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType
const (
DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude
DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly
)
// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type.
func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType {
return generated.PossibleDeleteSnapshotsOptionTypeValues()
}
// AccessTier defines values for Blob Access Tier.
type AccessTier = generated.AccessTier
const (
AccessTierArchive AccessTier = generated.AccessTierArchive
AccessTierCool AccessTier = generated.AccessTierCool
AccessTierHot AccessTier = generated.AccessTierHot
AccessTierP10 AccessTier = generated.AccessTierP10
AccessTierP15 AccessTier = generated.AccessTierP15
AccessTierP20 AccessTier = generated.AccessTierP20
AccessTierP30 AccessTier = generated.AccessTierP30
AccessTierP4 AccessTier = generated.AccessTierP4
AccessTierP40 AccessTier = generated.AccessTierP40
AccessTierP50 AccessTier = generated.AccessTierP50
AccessTierP6 AccessTier = generated.AccessTierP6
AccessTierP60 AccessTier = generated.AccessTierP60
AccessTierP70 AccessTier = generated.AccessTierP70
AccessTierP80 AccessTier = generated.AccessTierP80
AccessTierPremium AccessTier = generated.AccessTierPremium
)
// PossibleAccessTierValues returns the possible values for the AccessTier const type.
func PossibleAccessTierValues() []AccessTier {
return generated.PossibleAccessTierValues()
}
// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate.
// Valid values are High and Standard.
type RehydratePriority = generated.RehydratePriority
const (
RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh
RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard
)
// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type.
func PossibleRehydratePriorityValues() []RehydratePriority {
return generated.PossibleRehydratePriorityValues()
}
// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode
type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode
const (
ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable
ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked
ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked
)
// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type.
func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode {
return generated.PossibleImmutabilityPolicyModeValues()
}
// ImmutabilityPolicySetting returns the possible values for the ImmutabilityPolicySetting const type.
type ImmutabilityPolicySetting = generated.ImmutabilityPolicySetting
const (
ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingUnlocked
ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingLocked
)
// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type.
func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting {
return generated.PossibleImmutabilityPolicySettingValues()
}
// CopyStatusType defines values for CopyStatusType
type CopyStatusType = generated.CopyStatusType
const (
CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending
CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess
CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted
CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed
)
// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type.
func PossibleCopyStatusTypeValues() []CopyStatusType {
return generated.PossibleCopyStatusTypeValues()
}
// EncryptionAlgorithmType defines values for EncryptionAlgorithmType.
type EncryptionAlgorithmType = generated.EncryptionAlgorithmType
const (
EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone
EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256
)
// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type.
func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType {
return generated.PossibleEncryptionAlgorithmTypeValues()
}
// ArchiveStatus defines values for ArchiveStatus.
type ArchiveStatus = generated.ArchiveStatus
const (
ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool
ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot
)
// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type.
func PossibleArchiveStatusValues() []ArchiveStatus {
return generated.PossibleArchiveStatusValues()
}
// DeleteType defines values for DeleteType.
type DeleteType = generated.DeleteType
const (
DeleteTypeNone DeleteType = generated.DeleteTypeNone
DeleteTypePermanent DeleteType = generated.DeleteTypePermanent
)
// PossibleDeleteTypeValues returns the possible values for the DeleteType const type.
func PossibleDeleteTypeValues() []DeleteType {
return generated.PossibleDeleteTypeValues()
}
// QueryFormatType - The quick query format type.
type QueryFormatType = generated.QueryFormatType
const (
QueryFormatTypeDelimited QueryFormatType = generated.QueryFormatTypeDelimited
QueryFormatTypeJSON QueryFormatType = generated.QueryFormatTypeJSON
QueryFormatTypeArrow QueryFormatType = generated.QueryFormatTypeArrow
QueryFormatTypeParquet QueryFormatType = generated.QueryFormatTypeParquet
)
// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type.
func PossibleQueryFormatTypeValues() []QueryFormatType {
return generated.PossibleQueryFormatTypeValues()
}
// TransferValidationType abstracts the various mechanisms used to verify a transfer.
type TransferValidationType = exported.TransferValidationType
// TransferValidationTypeCRC64 is a TransferValidationType used to provide a precomputed CRC64.
type TransferValidationTypeCRC64 = exported.TransferValidationTypeCRC64
// TransferValidationTypeComputeCRC64 is a TransferValidationType that indicates a CRC64 should be computed during transfer.
func TransferValidationTypeComputeCRC64() TransferValidationType {
return exported.TransferValidationTypeComputeCRC64()
}
// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5.
type TransferValidationTypeMD5 = exported.TransferValidationTypeMD5
// SourceContentValidationType abstracts the various mechanisms used to validate source content.
// This interface is not publicly implementable.
type SourceContentValidationType interface {
Apply(generated.SourceContentSetter)
notPubliclyImplementable()
}
// SourceContentValidationTypeCRC64 is a SourceContentValidationType used to provide a precomputed CRC64.
type SourceContentValidationTypeCRC64 []byte
// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeCRC64.
func (s SourceContentValidationTypeCRC64) Apply(src generated.SourceContentSetter) {
src.SetSourceContentCRC64(s)
}
func (SourceContentValidationTypeCRC64) notPubliclyImplementable() {}
var _ SourceContentValidationType = (SourceContentValidationTypeCRC64)(nil)
// SourceContentValidationTypeMD5 is a SourceContentValidationType used to provide a precomputed MD5.
type SourceContentValidationTypeMD5 []byte
// Apply implements the SourceContentValidationType interface for type SourceContentValidationTypeMD5.
func (s SourceContentValidationTypeMD5) Apply(src generated.SourceContentSetter) {
src.SetSourceContentMD5(s)
}
func (SourceContentValidationTypeMD5) notPubliclyImplementable() {}
var _ SourceContentValidationType = (SourceContentValidationTypeMD5)(nil)

View file

@ -0,0 +1,567 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// SharedKeyCredential contains an account's name and its primary or secondary key.
type SharedKeyCredential = exported.SharedKeyCredential
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
return exported.NewSharedKeyCredential(accountName, accountKey)
}
// Type Declarations ---------------------------------------------------------------------
// AccessConditions identifies blob-specific access conditions which you optionally set.
type AccessConditions = exported.BlobAccessConditions
// LeaseAccessConditions contains optional parameters to access leased entity.
type LeaseAccessConditions = exported.LeaseAccessConditions
// ModifiedAccessConditions contains a group of parameters for specifying access conditions.
type ModifiedAccessConditions = exported.ModifiedAccessConditions
// CPKInfo contains a group of parameters for client provided encryption key.
type CPKInfo = generated.CPKInfo
// CPKScopeInfo contains a group of parameters for client provided encryption scope.
type CPKScopeInfo = generated.CPKScopeInfo
// HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
type HTTPHeaders = generated.BlobHTTPHeaders
// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method.
type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions
// Tags represent map of blob index tags
type Tags = generated.BlobTag
// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and
// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange
// which has an offset but no zero value count indicates from the offset to the resource's end.
type HTTPRange = exported.HTTPRange
// Request Model Declaration -------------------------------------------------------------------------------------------
// DownloadStreamOptions contains the optional parameters for the Client.Download method.
type DownloadStreamOptions struct {
// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
// range is less than or equal to 4 MB in size.
RangeGetContentMD5 *bool
// Range specifies a range of bytes. The default value is all bytes.
Range HTTPRange
AccessConditions *AccessConditions
CPKInfo *CPKInfo
CPKScopeInfo *CPKScopeInfo
}
func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
basics := generated.BlobClientDownloadOptions{
RangeGetContentMD5: o.RangeGetContentMD5,
Range: exported.FormatHTTPRange(o.Range),
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// downloadOptions contains common options used by the DownloadBuffer and DownloadFile functions.
type downloadOptions struct {
// Range specifies a range of bytes. The default value is all bytes.
Range HTTPRange
// BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are received.
Progress func(bytesTransferred int64)
// BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions *AccessConditions
// ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
CPKInfo *CPKInfo
CPKScopeInfo *CPKScopeInfo
// Concurrency indicates the maximum number of blocks to download in parallel (0=default).
Concurrency uint16
// RetryReaderOptionsPerBlock is used when downloading each block.
RetryReaderOptionsPerBlock RetryReaderOptions
}
func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions {
if o == nil {
return nil
}
return &GetPropertiesOptions{
AccessConditions: o.AccessConditions,
CPKInfo: o.CPKInfo,
}
}
func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContentMD5 *bool) *DownloadStreamOptions {
if o == nil {
return nil
}
return &DownloadStreamOptions{
AccessConditions: o.AccessConditions,
CPKInfo: o.CPKInfo,
CPKScopeInfo: o.CPKScopeInfo,
Range: rnge,
RangeGetContentMD5: rangeGetContentMD5,
}
}
// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method.
type DownloadBufferOptions struct {
// Range specifies a range of bytes. The default value is all bytes.
Range HTTPRange
// BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are received.
Progress func(bytesTransferred int64)
// BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions *AccessConditions
// CPKInfo contains a group of parameters for client provided encryption key.
CPKInfo *CPKInfo
// CPKScopeInfo contains a group of parameters for client provided encryption scope.
CPKScopeInfo *CPKScopeInfo
// Concurrency indicates the maximum number of blocks to download in parallel (0=default).
Concurrency uint16
// RetryReaderOptionsPerBlock is used when downloading each block.
RetryReaderOptionsPerBlock RetryReaderOptions
}
// DownloadFileOptions contains the optional parameters for the DownloadFile method.
type DownloadFileOptions struct {
// Range specifies a range of bytes. The default value is all bytes.
Range HTTPRange
// BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are received.
Progress func(bytesTransferred int64)
// BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions *AccessConditions
// ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
CPKInfo *CPKInfo
CPKScopeInfo *CPKScopeInfo
// Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5.
Concurrency uint16
// RetryReaderOptionsPerBlock is used when downloading each block.
RetryReaderOptionsPerBlock RetryReaderOptions
}
// ---------------------------------------------------------------------------------------------------------------------
// DeleteOptions contains the optional parameters for the Client.Delete method.
type DeleteOptions struct {
// Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob
// and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself.
DeleteSnapshots *DeleteSnapshotsOptionType
AccessConditions *AccessConditions
// Setting DeleteType to DeleteTypePermanent will permanently delete soft-delete snapshot and/or version blobs.
// WARNING: This is a dangerous operation and should not be used unless you know the implications. Please proceed
// with caution.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob
BlobDeleteType *DeleteType
}
func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
basics := generated.BlobClientDeleteOptions{
DeleteSnapshots: o.DeleteSnapshots,
DeleteType: o.BlobDeleteType, // None by default
}
if o.AccessConditions == nil {
return &basics, nil, nil
}
return &basics, o.AccessConditions.LeaseAccessConditions, o.AccessConditions.ModifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// UndeleteOptions contains the optional parameters for the Client.Undelete method.
type UndeleteOptions struct {
// placeholder for future options
}
func (o *UndeleteOptions) format() *generated.BlobClientUndeleteOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// SetTierOptions contains the optional parameters for the Client.SetTier method.
type SetTierOptions struct {
// Optional: Indicates the priority with which to rehydrate an archived blob.
RehydratePriority *RehydratePriority
AccessConditions *AccessConditions
}
func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &generated.BlobClientSetTierOptions{RehydratePriority: o.RehydratePriority}, leaseAccessConditions, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method
type GetPropertiesOptions struct {
AccessConditions *AccessConditions
CPKInfo *CPKInfo
}
func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions,
*generated.LeaseAccessConditions, *generated.CPKInfo, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return nil, leaseAccessConditions, o.CPKInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method.
type SetHTTPHeadersOptions struct {
AccessConditions *AccessConditions
}
func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return nil, leaseAccessConditions, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SetMetadataOptions provides set of configurations for Set Metadata on blob operation
type SetMetadataOptions struct {
AccessConditions *AccessConditions
CPKInfo *CPKInfo
CPKScopeInfo *CPKScopeInfo
}
func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CPKInfo,
*CPKScopeInfo, *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method.
type CreateSnapshotOptions struct {
Metadata map[string]*string
AccessConditions *AccessConditions
CPKInfo *CPKInfo
CPKScopeInfo *CPKScopeInfo
}
func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CPKInfo,
*generated.CPKScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &generated.BlobClientCreateSnapshotOptions{
Metadata: o.Metadata,
}, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method.
type StartCopyFromURLOptions struct {
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *ImmutabilityPolicySetting
// Specified if a legal hold should be set on the blob.
LegalHold *bool
// Optional. Used to set blob tags in various blob operations.
BlobTags map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
// are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
// blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
// See Naming and Referencing Containers, Blobs, and Metadata for more information.
Metadata map[string]*string
// Optional: Indicates the priority with which to rehydrate an archived blob.
RehydratePriority *RehydratePriority
// Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
SealBlob *bool
// Optional. Indicates the tier to be set on the blob.
Tier *AccessTier
SourceModifiedAccessConditions *SourceModifiedAccessConditions
AccessConditions *AccessConditions
}
func (o *StartCopyFromURLOptions) format() (*generated.BlobClientStartCopyFromURLOptions,
*generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
basics := generated.BlobClientStartCopyFromURLOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags),
Metadata: o.Metadata,
RehydratePriority: o.RehydratePriority,
SealBlob: o.SealBlob,
Tier: o.Tier,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
LegalHold: o.LegalHold,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// AbortCopyFromURLOptions contains the optional parameters for the Client.AbortCopyFromURL method.
type AbortCopyFromURLOptions struct {
LeaseAccessConditions *LeaseAccessConditions
}
func (o *AbortCopyFromURLOptions) format() (*generated.BlobClientAbortCopyFromURLOptions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil
}
return nil, o.LeaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SetTagsOptions contains the optional parameters for the Client.SetTags method.
type SetTagsOptions struct {
// The version id parameter is an opaque DateTime value that, when present,
// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
VersionID *string
// Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Optional header, Specifies the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
AccessConditions *AccessConditions
}
func (o *SetTagsOptions) format() (*generated.BlobClientSetTagsOptions, *ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil
}
options := &generated.BlobClientSetTagsOptions{
TransactionalContentMD5: o.TransactionalContentMD5,
TransactionalContentCRC64: o.TransactionalContentCRC64,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// GetTagsOptions contains the optional parameters for the Client.GetTags method.
type GetTagsOptions struct {
// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve.
Snapshot *string
// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
// It's for service version 2019-10-10 and newer.
VersionID *string
BlobAccessConditions *AccessConditions
}
func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil
}
options := &generated.BlobClientGetTagsOptions{
Snapshot: o.Snapshot,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions)
return options, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SetImmutabilityPolicyOptions contains the parameter for Client.SetImmutabilityPolicy
type SetImmutabilityPolicyOptions struct {
// Specifies the immutability policy mode to set on the blob. Possible values to set include: "Locked", "Unlocked".
// "Mutable" can only be returned by service, don't set to "Mutable". If mode is not set - it will default to Unlocked.
Mode *ImmutabilityPolicySetting
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil
}
ac := &exported.BlobAccessConditions{
ModifiedAccessConditions: o.ModifiedAccessConditions,
}
_, modifiedAccessConditions := exported.FormatBlobAccessConditions(ac)
options := &generated.BlobClientSetImmutabilityPolicyOptions{
ImmutabilityPolicyMode: o.Mode,
}
return options, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// DeleteImmutabilityPolicyOptions contains the optional parameters for the Client.DeleteImmutabilityPolicy method.
type DeleteImmutabilityPolicyOptions struct {
// placeholder for future options
}
func (o *DeleteImmutabilityPolicyOptions) format() *generated.BlobClientDeleteImmutabilityPolicyOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// SetLegalHoldOptions contains the optional parameters for the Client.SetLegalHold method.
type SetLegalHoldOptions struct {
// placeholder for future options
}
func (o *SetLegalHoldOptions) format() *generated.BlobClientSetLegalHoldOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method.
type GetSASURLOptions struct {
StartTime *time.Time
}
func (o *GetSASURLOptions) format() time.Time {
if o == nil {
return time.Time{}
}
var st time.Time
if o.StartTime != nil {
st = o.StartTime.UTC()
} else {
st = time.Time{}
}
return st
}
// ---------------------------------------------------------------------------------------------------------------------
// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method.
type CopyFromURLOptions struct {
// Optional. Used to set blob tags in various blob operations.
BlobTags map[string]string
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *ImmutabilityPolicySetting
// Specified if a legal hold should be set on the blob.
LegalHold *bool
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination
// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
// is not copied from the source blob or file. Note that beginning with
// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
// Blobs, and Metadata for more information.
Metadata map[string]*string
// Specify the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Optional. Indicates the tier to be set on the blob.
Tier *AccessTier
SourceModifiedAccessConditions *SourceModifiedAccessConditions
BlobAccessConditions *AccessConditions
}
func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
options := &generated.BlobClientCopyFromURLOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags),
CopySourceAuthorization: o.CopySourceAuthorization,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
LegalHold: o.LegalHold,
Metadata: o.Metadata,
SourceContentMD5: o.SourceContentMD5,
Tier: o.Tier,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions)
return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions
}

View file

@ -0,0 +1,116 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"context"
"io"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// DownloadResponse contains the response from method BlobClient.Download.
type DownloadResponse = generated.BlobClientDownloadResponse
// DownloadStreamResponse contains the response from the DownloadStream method.
// To read from the stream, read from the Body field, or call the NewRetryReader method.
type DownloadStreamResponse struct {
DownloadResponse
ObjectReplicationRules []ObjectReplicationPolicy
client *Client
getInfo httpGetterInfo
cpkInfo *CPKInfo
cpkScope *CPKScopeInfo
}
// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while
// reading, it will make additional requests to reestablish a connection and continue reading.
// Pass nil for options to accept the default options.
// Callers of this method should not access the DownloadStreamResponse.Body field.
func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader {
if options == nil {
options = &RetryReaderOptions{}
}
return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) {
accessConditions := &AccessConditions{
ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag},
}
options := DownloadStreamOptions{
Range: getInfo.Range,
AccessConditions: accessConditions,
CPKInfo: r.cpkInfo,
CPKScopeInfo: r.cpkScope,
}
resp, err := r.client.DownloadStream(ctx, &options)
if err != nil {
return nil, err
}
return resp.Body, err
}, *options)
}
// DeleteResponse contains the response from method BlobClient.Delete.
type DeleteResponse = generated.BlobClientDeleteResponse
// UndeleteResponse contains the response from method BlobClient.Undelete.
type UndeleteResponse = generated.BlobClientUndeleteResponse
// SetTierResponse contains the response from method BlobClient.SetTier.
type SetTierResponse = generated.BlobClientSetTierResponse
// GetPropertiesResponse contains the response from method BlobClient.GetProperties.
type GetPropertiesResponse = generated.BlobClientGetPropertiesResponse
// SetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders.
type SetHTTPHeadersResponse = generated.BlobClientSetHTTPHeadersResponse
// SetMetadataResponse contains the response from method BlobClient.SetMetadata.
type SetMetadataResponse = generated.BlobClientSetMetadataResponse
// CreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot.
type CreateSnapshotResponse = generated.BlobClientCreateSnapshotResponse
// StartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL.
type StartCopyFromURLResponse = generated.BlobClientStartCopyFromURLResponse
// AbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL.
type AbortCopyFromURLResponse = generated.BlobClientAbortCopyFromURLResponse
// SetTagsResponse contains the response from method BlobClient.SetTags.
type SetTagsResponse = generated.BlobClientSetTagsResponse
// GetTagsResponse contains the response from method BlobClient.GetTags.
type GetTagsResponse = generated.BlobClientGetTagsResponse
// SetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy.
type SetImmutabilityPolicyResponse = generated.BlobClientSetImmutabilityPolicyResponse
// DeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicyResponse.
type DeleteImmutabilityPolicyResponse = generated.BlobClientDeleteImmutabilityPolicyResponse
// SetLegalHoldResponse contains the response from method BlobClient.SetLegalHold.
type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse
// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL.
type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse
// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease.
type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse
// BreakLeaseResponse contains the response from method BlobClient.BreakLease.
type BreakLeaseResponse = generated.BlobClientBreakLeaseResponse
// ChangeLeaseResponse contains the response from method BlobClient.ChangeLease.
type ChangeLeaseResponse = generated.BlobClientChangeLeaseResponse
// ReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease.
type ReleaseLeaseResponse = generated.BlobClientReleaseLeaseResponse
// RenewLeaseResponse contains the response from method BlobClient.RenewLease.
type RenewLeaseResponse = generated.BlobClientRenewLeaseResponse

View file

@ -1,52 +1,47 @@
package azblob
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"context"
"io"
"net"
"net/http"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
)
const CountToEnd = 0
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error)
type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error)
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
// that should be used to make an HTTP GET request.
type HTTPGetterInfo struct {
// Offset specifies the start offset that should be used when
// creating the HTTP GET request's Range header
Offset int64
// Count specifies the count of bytes that should be used to calculate
// the end offset when creating the HTTP GET request's Range header
Count int64
type httpGetterInfo struct {
Range HTTPRange
// ETag specifies the resource's etag that should be used when creating
// the HTTP GET request's If-Match header
ETag ETag
ETag *azcore.ETag
}
// FailedReadNotifier is a function type that represents the notification function called when a read fails
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool)
// RetryReaderOptions contains properties which can help to decide when to do retry.
// RetryReaderOptions configures the retry reader's behavior.
// Zero-value fields will have their specified default values applied during use.
// This allows for modification of a subset of fields.
type RetryReaderOptions struct {
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
// while reading from a RetryReader. A value of zero means that no additional HTTP
// GET requests will be made.
MaxRetryRequests int
doInjectError bool
doInjectErrorRound int
injectedError error
// MaxRetries specifies the maximum number of attempts a failed read will be retried
// before producing an error.
// The default value is three.
MaxRetries int32
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
NotifyFailedRead FailedReadNotifier
// OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging.
OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool)
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
// EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
@ -55,51 +50,59 @@ type RetryReaderOptions struct {
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
// which will be retried.
TreatEarlyCloseAsError bool
// The default value is false.
EarlyCloseAsError bool
ClientProvidedKeyOptions ClientProvidedKeyOptions
doInjectError bool
doInjectErrorRound int32
injectedError error
}
// retryReader implements io.ReaderCloser methods.
// retryReader tries to read from response, and if there is retriable network error
// RetryReader attempts to read from response, and if there is a retry-able network error
// returned during reading, it will retry according to retry reader option through executing
// user defined action with provided data to get a new response, and continue the overall reading process
// through reading from the new response.
type retryReader struct {
ctx context.Context
info HTTPGetterInfo
countWasBounded bool
o RetryReaderOptions
getter HTTPGetter
// RetryReader implements the io.ReadCloser interface.
type RetryReader struct {
ctx context.Context
info httpGetterInfo
retryReaderOptions RetryReaderOptions
getter httpGetter
countWasBounded bool
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
responseMu *sync.Mutex
response *http.Response
response io.ReadCloser
}
// NewRetryReader creates a retry reader.
func NewRetryReader(ctx context.Context, initialResponse *http.Response,
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser {
return &retryReader{
ctx: ctx,
getter: getter,
info: info,
countWasBounded: info.Count != CountToEnd,
response: initialResponse,
responseMu: &sync.Mutex{},
o: o}
// newRetryReader creates a retry reader.
func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader {
if o.MaxRetries < 1 {
o.MaxRetries = 3
}
return &RetryReader{
ctx: ctx,
getter: getter,
info: info,
countWasBounded: info.Range.Count != CountToEnd,
response: initialResponse,
responseMu: &sync.Mutex{},
retryReaderOptions: o,
}
}
func (s *retryReader) setResponse(r *http.Response) {
// setResponse function
func (s *RetryReader) setResponse(r io.ReadCloser) {
s.responseMu.Lock()
defer s.responseMu.Unlock()
s.response = r
}
func (s *retryReader) Read(p []byte) (n int, err error) {
for try := 0; ; try++ {
// Read from retry reader
func (s *RetryReader) Read(p []byte) (n int, err error) {
for try := int32(0); ; try++ {
//fmt.Println(try) // Comment out for debugging.
if s.countWasBounded && s.info.Count == CountToEnd {
if s.countWasBounded && s.info.Range.Count == CountToEnd {
// User specified an original count and the remaining bytes are 0, return 0, EOF
return 0, io.EOF
}
@ -116,12 +119,12 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
s.setResponse(newResponse)
resp = newResponse
}
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
// Injection mechanism for testing.
if s.o.doInjectError && try == s.o.doInjectErrorRound {
if s.o.injectedError != nil {
err = s.o.injectedError
if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound {
if s.retryReaderOptions.injectedError != nil {
err = s.retryReaderOptions.injectedError
} else {
err = &net.DNSError{IsTemporary: true}
}
@ -129,25 +132,26 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
// We successfully read data or end EOF.
if err == nil || err == io.EOF {
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
if s.info.Count != CountToEnd {
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
if s.info.Range.Count != CountToEnd {
s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
}
return n, err // Return the return to the caller
}
s.Close() // Error, close stream
_ = s.Close()
s.setResponse(nil) // Our stream is no longer good
// Check the retry count and error code, and decide whether to retry.
retriesExhausted := try >= s.o.MaxRetryRequests
retriesExhausted := try >= s.retryReaderOptions.MaxRetries
_, isNetError := err.(net.Error)
isUnexpectedEOF := err == io.ErrUnexpectedEOF
willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted
// Notify, for logging purposes, of any failures
if s.o.NotifyFailedRead != nil {
if s.retryReaderOptions.OnFailedRead != nil {
failureCount := try + 1 // because try is zero-based
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry)
s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry)
}
if willRetry {
@ -163,24 +167,26 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
// which is exactly the behaviour we want.
// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
// then there are two different types of error that may happen - either the one one we check for here,
// then there are two different types of error that may happen - either the one we check for here,
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
func (s *retryReader) wasRetryableEarlyClose(err error) bool {
if s.o.TreatEarlyCloseAsError {
func (s *RetryReader) wasRetryableEarlyClose(err error) bool {
if s.retryReaderOptions.EarlyCloseAsError {
return false // user wants all early closes to be errors, and so not retryable
}
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
}
// ReadOnClosedBodyMessage of retry reader
const ReadOnClosedBodyMessage = "read on closed response body"
func (s *retryReader) Close() error {
// Close retry reader
func (s *RetryReader) Close() error {
s.responseMu.Lock()
defer s.responseMu.Unlock()
if s.response != nil && s.response.Body != nil {
return s.response.Body.Close()
if s.response != nil {
return s.response.Close()
}
return nil
}

View file

@ -0,0 +1,79 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ObjectReplicationRules struct
type ObjectReplicationRules struct {
RuleID string
Status string
}
// ObjectReplicationPolicy are deserialized attributes.
type ObjectReplicationPolicy struct {
PolicyID *string
Rules *[]ObjectReplicationRules
}
// deserializeORSPolicies is utility function to deserialize ORS Policies.
func deserializeORSPolicies(policies map[string]*string) (objectReplicationPolicies []ObjectReplicationPolicy) {
if policies == nil {
return nil
}
// For source blobs (blobs that have policy ids and rule ids applied to them),
// the header will be formatted as "x-ms-or-<policy_id>_<rule_id>: {Complete, Failed}".
// The value of this header is the status of the replication.
orPolicyStatusHeader := make(map[string]*string)
for key, value := range policies {
if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" {
orPolicyStatusHeader[key] = value
}
}
parsedResult := make(map[string][]ObjectReplicationRules)
for key, value := range orPolicyStatusHeader {
policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_")
policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1]
parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleID: ruleId, Status: *value})
}
for policyId, rules := range parsedResult {
objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{
PolicyID: &policyId,
Rules: &rules,
})
}
return
}
// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders.
func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders {
return HTTPHeaders{
BlobContentType: resp.ContentType,
BlobContentEncoding: resp.ContentEncoding,
BlobContentLanguage: resp.ContentLanguage,
BlobContentDisposition: resp.ContentDisposition,
BlobCacheControl: resp.CacheControl,
BlobContentMD5: resp.ContentMD5,
}
}
// URLParts object represents the components that make up an Azure Storage Container/Blob URL.
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type URLParts = sas.URLParts
// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object.
func ParseURL(u string) (URLParts, error) {
return sas.ParseURL(u)
}

View file

@ -0,0 +1,156 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package bloberror
import (
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// HasCode returns true if the provided error is an *azcore.ResponseError
// with its ErrorCode field equal to one of the specified Codes.
func HasCode(err error, codes ...Code) bool {
var respErr *azcore.ResponseError
if !errors.As(err, &respErr) {
return false
}
for _, code := range codes {
if respErr.ErrorCode == string(code) {
return true
}
}
return false
}
// Code - Error codes returned by the service
type Code = generated.StorageErrorCode
const (
AccountAlreadyExists Code = "AccountAlreadyExists"
AccountBeingCreated Code = "AccountBeingCreated"
AccountIsDisabled Code = "AccountIsDisabled"
AppendPositionConditionNotMet Code = "AppendPositionConditionNotMet"
AuthenticationFailed Code = "AuthenticationFailed"
AuthorizationFailure Code = "AuthorizationFailure"
AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch"
AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch"
AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch"
AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch"
AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch"
BlobAlreadyExists Code = "BlobAlreadyExists"
BlobArchived Code = "BlobArchived"
BlobBeingRehydrated Code = "BlobBeingRehydrated"
BlobImmutableDueToPolicy Code = "BlobImmutableDueToPolicy"
BlobNotArchived Code = "BlobNotArchived"
BlobNotFound Code = "BlobNotFound"
BlobOverwritten Code = "BlobOverwritten"
BlobTierInadequateForContentLength Code = "BlobTierInadequateForContentLength"
BlobUsesCustomerSpecifiedEncryption Code = "BlobUsesCustomerSpecifiedEncryption"
BlockCountExceedsLimit Code = "BlockCountExceedsLimit"
BlockListTooLong Code = "BlockListTooLong"
CannotChangeToLowerTier Code = "CannotChangeToLowerTier"
CannotVerifyCopySource Code = "CannotVerifyCopySource"
ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported"
ConditionNotMet Code = "ConditionNotMet"
ContainerAlreadyExists Code = "ContainerAlreadyExists"
ContainerBeingDeleted Code = "ContainerBeingDeleted"
ContainerDisabled Code = "ContainerDisabled"
ContainerNotFound Code = "ContainerNotFound"
ContentLengthLargerThanTierLimit Code = "ContentLengthLargerThanTierLimit"
CopyAcrossAccountsNotSupported Code = "CopyAcrossAccountsNotSupported"
CopyIDMismatch Code = "CopyIdMismatch"
EmptyMetadataKey Code = "EmptyMetadataKey"
FeatureVersionMismatch Code = "FeatureVersionMismatch"
IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch"
IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot"
InfiniteLeaseDurationRequired Code = "InfiniteLeaseDurationRequired"
InsufficientAccountPermissions Code = "InsufficientAccountPermissions"
InternalError Code = "InternalError"
InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo"
InvalidBlobOrBlock Code = "InvalidBlobOrBlock"
InvalidBlobTier Code = "InvalidBlobTier"
InvalidBlobType Code = "InvalidBlobType"
InvalidBlockID Code = "InvalidBlockId"
InvalidBlockList Code = "InvalidBlockList"
InvalidHTTPVerb Code = "InvalidHttpVerb"
InvalidHeaderValue Code = "InvalidHeaderValue"
InvalidInput Code = "InvalidInput"
InvalidMD5 Code = "InvalidMd5"
InvalidMetadata Code = "InvalidMetadata"
InvalidOperation Code = "InvalidOperation"
InvalidPageRange Code = "InvalidPageRange"
InvalidQueryParameterValue Code = "InvalidQueryParameterValue"
InvalidRange Code = "InvalidRange"
InvalidResourceName Code = "InvalidResourceName"
InvalidSourceBlobType Code = "InvalidSourceBlobType"
InvalidSourceBlobURL Code = "InvalidSourceBlobUrl"
InvalidURI Code = "InvalidUri"
InvalidVersionForPageBlobOperation Code = "InvalidVersionForPageBlobOperation"
InvalidXMLDocument Code = "InvalidXmlDocument"
InvalidXMLNodeValue Code = "InvalidXmlNodeValue"
LeaseAlreadyBroken Code = "LeaseAlreadyBroken"
LeaseAlreadyPresent Code = "LeaseAlreadyPresent"
LeaseIDMismatchWithBlobOperation Code = "LeaseIdMismatchWithBlobOperation"
LeaseIDMismatchWithContainerOperation Code = "LeaseIdMismatchWithContainerOperation"
LeaseIDMismatchWithLeaseOperation Code = "LeaseIdMismatchWithLeaseOperation"
LeaseIDMissing Code = "LeaseIdMissing"
LeaseIsBreakingAndCannotBeAcquired Code = "LeaseIsBreakingAndCannotBeAcquired"
LeaseIsBreakingAndCannotBeChanged Code = "LeaseIsBreakingAndCannotBeChanged"
LeaseIsBrokenAndCannotBeRenewed Code = "LeaseIsBrokenAndCannotBeRenewed"
LeaseLost Code = "LeaseLost"
LeaseNotPresentWithBlobOperation Code = "LeaseNotPresentWithBlobOperation"
LeaseNotPresentWithContainerOperation Code = "LeaseNotPresentWithContainerOperation"
LeaseNotPresentWithLeaseOperation Code = "LeaseNotPresentWithLeaseOperation"
MD5Mismatch Code = "Md5Mismatch"
CRC64Mismatch Code = "Crc64Mismatch"
MaxBlobSizeConditionNotMet Code = "MaxBlobSizeConditionNotMet"
MetadataTooLarge Code = "MetadataTooLarge"
MissingContentLengthHeader Code = "MissingContentLengthHeader"
MissingRequiredHeader Code = "MissingRequiredHeader"
MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter"
MissingRequiredXMLNode Code = "MissingRequiredXmlNode"
MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported"
NoAuthenticationInformation Code = "NoAuthenticationInformation"
NoPendingCopyOperation Code = "NoPendingCopyOperation"
OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob"
OperationTimedOut Code = "OperationTimedOut"
OutOfRangeInput Code = "OutOfRangeInput"
OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue"
PendingCopyOperation Code = "PendingCopyOperation"
PreviousSnapshotCannotBeNewer Code = "PreviousSnapshotCannotBeNewer"
PreviousSnapshotNotFound Code = "PreviousSnapshotNotFound"
PreviousSnapshotOperationNotSupported Code = "PreviousSnapshotOperationNotSupported"
RequestBodyTooLarge Code = "RequestBodyTooLarge"
RequestURLFailedToParse Code = "RequestUrlFailedToParse"
ResourceAlreadyExists Code = "ResourceAlreadyExists"
ResourceNotFound Code = "ResourceNotFound"
ResourceTypeMismatch Code = "ResourceTypeMismatch"
SequenceNumberConditionNotMet Code = "SequenceNumberConditionNotMet"
SequenceNumberIncrementTooLarge Code = "SequenceNumberIncrementTooLarge"
ServerBusy Code = "ServerBusy"
SnapshotCountExceeded Code = "SnapshotCountExceeded"
SnapshotOperationRateExceeded Code = "SnapshotOperationRateExceeded"
SnapshotsPresent Code = "SnapshotsPresent"
SourceConditionNotMet Code = "SourceConditionNotMet"
SystemInUse Code = "SystemInUse"
TargetConditionNotMet Code = "TargetConditionNotMet"
UnauthorizedBlobOverwrite Code = "UnauthorizedBlobOverwrite"
UnsupportedHTTPVerb Code = "UnsupportedHttpVerb"
UnsupportedHeader Code = "UnsupportedHeader"
UnsupportedQueryParameter Code = "UnsupportedQueryParameter"
UnsupportedXMLNode Code = "UnsupportedXmlNode"
)
var (
// MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential.
MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential")
)

View file

@ -0,0 +1,313 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"errors"
"io"
"sync"
"sync/atomic"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
// blockWriter provides methods to upload blocks that represent a file to a server and commit them.
// This allows us to provide a local implementation that fakes the server for hermetic testing.
type blockWriter interface {
StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error)
Upload(context.Context, io.ReadSeekCloser, *UploadOptions) (UploadResponse, error)
CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error)
}
// bufferManager provides an abstraction for the management of buffers.
// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm.
type bufferManager[T ~[]byte] interface {
// Acquire returns the channel that contains the pool of buffers.
Acquire() <-chan T
// Release releases the buffer back to the pool for reuse/cleanup.
Release(T)
// Grow grows the number of buffers, up to the predefined max.
// It returns the total number of buffers or an error.
// No error is returned if the number of buffers has reached max.
// This is called only from the reading goroutine.
Grow() (int, error)
// Free cleans up all buffers.
Free()
}
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) (CommitBlockListResponse, error) {
options.setDefaults()
wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing
errCh := make(chan error, 1) // contains the first error encountered during processing
buffers := getBufferManager(options.Concurrency, options.BlockSize)
defer buffers.Free()
// this controls the lifetime of the uploading goroutines.
// if an error is encountered, cancel() is called which will terminate all uploads.
// NOTE: the ordering is important here. cancel MUST execute before
// cleaning up the buffers so that any uploading goroutines exit first,
// releasing their buffers back to the pool for cleanup.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// all blocks have IDs that start with a random UUID
blockIDPrefix, err := uuid.New()
if err != nil {
return CommitBlockListResponse{}, err
}
tracker := blockTracker{
blockIDPrefix: blockIDPrefix,
options: options,
}
// This goroutine grabs a buffer, reads from the stream into the buffer,
// then creates a goroutine to upload/stage the block.
for blockNum := uint32(0); true; blockNum++ {
var buffer T
select {
case buffer = <-buffers.Acquire():
// got a buffer
default:
// no buffer available; allocate a new buffer if possible
if _, err := buffers.Grow(); err != nil {
return CommitBlockListResponse{}, err
}
// either grab the newly allocated buffer or wait for one to become available
buffer = <-buffers.Acquire()
}
var n int
n, err = io.ReadFull(src, buffer)
if n > 0 {
// some data was read, upload it
wg.Add(1) // We're posting a buffer to be sent
// NOTE: we must pass blockNum as an arg to our goroutine else
// it's captured by reference and can change underneath us!
go func(blockNum uint32) {
// Upload the outgoing block, matching the number of bytes read
err := tracker.uploadBlock(ctx, dst, blockNum, buffer[:n])
if err != nil {
select {
case errCh <- err:
// error was set
default:
// some other error is already set
}
cancel()
}
buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now
// signal that the block has been staged.
// we MUST do this after attempting to write to errCh
// to avoid it racing with the reading goroutine.
wg.Done()
}(blockNum)
} else {
// nothing was read so the buffer is empty, send it back for reuse/clean-up.
buffers.Release(buffer)
}
if err != nil { // The reader is done, no more outgoing buffers
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
// these are expected errors, we don't surface those
err = nil
} else {
// some other error happened, terminate any outstanding uploads
cancel()
}
break
}
}
wg.Wait() // Wait for all outgoing blocks to complete
if err != nil {
// there was an error reading from src, favor this error over any error during staging
return CommitBlockListResponse{}, err
}
select {
case err = <-errCh:
// there was an error during staging
return CommitBlockListResponse{}, err
default:
// no error was encountered
}
// If no error, after all blocks uploaded, commit them to the blob & return the result
return tracker.commitBlocks(ctx, dst)
}
// used to manage the uploading and committing of blocks
type blockTracker struct {
blockIDPrefix uuid.UUID // UUID used with all blockIDs
maxBlockNum uint32 // defaults to 0
firstBlock []byte // Used only if maxBlockNum is 0
options UploadStreamOptions
}
func (bt *blockTracker) uploadBlock(ctx context.Context, to blockWriter, num uint32, buffer []byte) error {
if num == 0 {
bt.firstBlock = buffer
// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
// If the payload is exactly the same size as the buffer, there may be more content coming in.
if len(buffer) < int(bt.options.BlockSize) {
return nil
}
} else {
// Else, upload a staged block...
atomicMorphUint32(&bt.maxBlockNum, func(startVal uint32) (val uint32, morphResult uint32) {
// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
if startVal < num {
return num, 0
}
return startVal, 0
})
}
blockID := newUUIDBlockID(bt.blockIDPrefix).WithBlockNumber(num).ToBase64()
_, err := to.StageBlock(ctx, blockID, streaming.NopCloser(bytes.NewReader(buffer)), bt.options.getStageBlockOptions())
return err
}
func (bt *blockTracker) commitBlocks(ctx context.Context, to blockWriter) (CommitBlockListResponse, error) {
// If the first block had the exact same size as the buffer
// we would have staged it as a block thinking that there might be more data coming
if bt.maxBlockNum == 0 && len(bt.firstBlock) < int(bt.options.BlockSize) {
// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
up, err := to.Upload(ctx, streaming.NopCloser(bytes.NewReader(bt.firstBlock)), bt.options.getUploadOptions())
if err != nil {
return CommitBlockListResponse{}, err
}
// convert UploadResponse to CommitBlockListResponse
return CommitBlockListResponse{
ClientRequestID: up.ClientRequestID,
ContentMD5: up.ContentMD5,
Date: up.Date,
ETag: up.ETag,
EncryptionKeySHA256: up.EncryptionKeySHA256,
EncryptionScope: up.EncryptionScope,
IsServerEncrypted: up.IsServerEncrypted,
LastModified: up.LastModified,
RequestID: up.RequestID,
Version: up.Version,
VersionID: up.VersionID,
//ContentCRC64: up.ContentCRC64, doesn't exist on UploadResponse
}, nil
}
// Multiple blocks staged, commit them all now
blockID := newUUIDBlockID(bt.blockIDPrefix)
blockIDs := make([]string, bt.maxBlockNum+1)
for bn := uint32(0); bn < bt.maxBlockNum+1; bn++ {
blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64()
}
return to.CommitBlockList(ctx, blockIDs, bt.options.getCommitBlockListOptions())
}
// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
// The AtomicMorpher callback is passed a startValue and based on this value it returns
// what the new value should be and the result that AtomicMorph should return to its caller.
type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult uint32)
// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) uint32 {
for {
currentVal := atomic.LoadUint32(target)
desiredVal, morphResult := morpher(currentVal)
if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) {
return morphResult
}
}
}
type blockID [64]byte
func (blockID blockID) ToBase64() string {
return base64.StdEncoding.EncodeToString(blockID[:])
}
type uuidBlockID blockID
func newUUIDBlockID(u uuid.UUID) uuidBlockID {
ubi := uuidBlockID{} // Create a new uuidBlockID
copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
// Block number defaults to 0
return ubi
}
func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID {
binary.BigEndian.PutUint32(ubi[len(uuid.UUID{}):], blockNumber) // Put block number after UUID
return ubi // Return the passed-in copy
}
func (ubi uuidBlockID) ToBase64() string {
return blockID(ubi).ToBase64()
}
// mmbPool implements the bufferManager interface.
// it uses anonymous memory mapped files for buffers.
// don't use this type directly, use newMMBPool() instead.
type mmbPool struct {
buffers chan mmb
count int
max int
size int64
}
func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] {
return &mmbPool{
buffers: make(chan mmb, maxBuffers),
max: maxBuffers,
size: bufferSize,
}
}
func (pool *mmbPool) Acquire() <-chan mmb {
return pool.buffers
}
func (pool *mmbPool) Grow() (int, error) {
if pool.count < pool.max {
buffer, err := newMMB(pool.size)
if err != nil {
return 0, err
}
pool.buffers <- buffer
pool.count++
}
return pool.count, nil
}
func (pool *mmbPool) Release(buffer mmb) {
pool.buffers <- buffer
}
func (pool *mmbPool) Free() {
for i := 0; i < pool.count; i++ {
buffer := <-pool.buffers
buffer.delete()
}
pool.count = 0
}

View file

@ -0,0 +1,532 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"bytes"
"context"
"encoding/base64"
"errors"
"io"
"os"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client defines a set of operations applicable to block blobs.
type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient]
// NewClient creates an instance of Client with the specified values.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
// This is used to anonymously access a blob or with a shared access signature (SAS) token.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt?<sas token>
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
// - blobURL - the URL of the blob e.g. https://<account>.blob.core.windows.net/container/blob.txt
// - cred - a SharedKeyCredential created with the matching blob's storage account and access key
// - options - client options; pass nil to accept the default values
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
// - connectionString - a connection string for the desired storage account
// - containerName - the name of the container within the storage account
// - blobName - the name of the blob within the container
// - options - client options; pass nil to accept the default values
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName)
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
}
return NewClientWithNoCredential(parsed.ServiceURL, options)
}
func (bb *Client) sharedKey() *blob.SharedKeyCredential {
return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb))
}
func (bb *Client) generated() *generated.BlockBlobClient {
_, blockBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb))
return blockBlob
}
func (bb *Client) innerBlobGenerated() *generated.BlobClient {
b := bb.BlobClient()
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (bb *Client) URL() string {
return bb.generated().Endpoint()
}
// BlobClient returns the embedded blob client for this AppendBlob client.
func (bb *Client) BlobClient() *blob.Client {
blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb))
return (*blob.Client)(blobClient)
}
// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (bb *Client) WithSnapshot(snapshot string) (*Client, error) {
p, err := blob.ParseURL(bb.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (bb *Client) WithVersionID(versionID string) (*Client, error) {
p, err := blob.ParseURL(bb.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil
}
// Upload creates a new block blob or overwrites an existing block blob.
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
// supported with Upload; the content of the existing blob is overwritten with the new content. To
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadOptions) (UploadResponse, error) {
count, err := shared.ValidateSeekableStreamAt0AndGetCount(body)
if err != nil {
return UploadResponse{}, err
}
opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format()
resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions)
return resp, err
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (StageBlockResponse, error) {
count, err := shared.ValidateSeekableStreamAt0AndGetCount(body)
if err != nil {
return StageBlockResponse{}, err
}
opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format()
if options != nil && options.TransactionalValidation != nil {
body, err = options.TransactionalValidation.Apply(body, opts)
if err != nil {
return StageBlockResponse{}, nil
}
}
resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo)
return resp, err
}
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// If count is CountToEnd (0), then data is read from specified offset to the end.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) {
stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format()
resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, 0, sourceURL, stageBlockFromURLOptions,
cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
return resp, err
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
// In order to be written as part of a blob, a block must have been successfully written
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (CommitBlockListResponse, error) {
// this is a code smell in the generated code
blockIds := make([]*string, len(base64BlockIDs))
for k, v := range base64BlockIDs {
blockIds[k] = to.Ptr(v)
}
blockLookupList := generated.BlockLookupList{Latest: blockIds}
var commitOptions *generated.BlockBlobClientCommitBlockListOptions
var headers *generated.BlobHTTPHeaders
var leaseAccess *blob.LeaseAccessConditions
var cpkInfo *generated.CPKInfo
var cpkScope *generated.CPKScopeInfo
var modifiedAccess *generated.ModifiedAccessConditions
if options != nil {
commitOptions = &generated.BlockBlobClientCommitBlockListOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(options.Tags),
Metadata: options.Metadata,
RequestID: options.RequestID,
Tier: options.Tier,
Timeout: options.Timeout,
TransactionalContentCRC64: options.TransactionalContentCRC64,
TransactionalContentMD5: options.TransactionalContentMD5,
LegalHold: options.LegalHold,
ImmutabilityPolicyMode: options.ImmutabilityPolicyMode,
ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime,
}
headers = options.HTTPHeaders
leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions)
cpkInfo = options.CPKInfo
cpkScope = options.CPKScopeInfo
}
resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess)
return resp, err
}
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
func (bb *Client) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (GetBlockListResponse, error) {
o, lac, mac := options.format()
resp, err := bb.generated().GetBlockList(ctx, listType, o, lac, mac)
return resp, err
}
// Redeclared APIs ----- Copy over to Append blob and Page blob as well.
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (bb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) {
return bb.BlobClient().Delete(ctx, o)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) {
return bb.BlobClient().Undelete(ctx, o)
}
// SetImmutabilityPolicy operation enables users to set the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (bb *Client) SetImmutabilityPolicy(ctx context.Context, expiryTime time.Time, options *blob.SetImmutabilityPolicyOptions) (blob.SetImmutabilityPolicyResponse, error) {
return bb.BlobClient().SetImmutabilityPolicy(ctx, expiryTime, options)
}
// DeleteImmutabilityPolicy operation enables users to delete the immutability policy on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (bb *Client) DeleteImmutabilityPolicy(ctx context.Context, options *blob.DeleteImmutabilityPolicyOptions) (blob.DeleteImmutabilityPolicyResponse, error) {
return bb.BlobClient().DeleteImmutabilityPolicy(ctx, options)
}
// SetLegalHold operation enables users to set legal hold on a blob.
// https://learn.microsoft.com/en-us/azure/storage/blobs/immutable-storage-overview
func (bb *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blob.SetLegalHoldOptions) (blob.SetLegalHoldResponse, error) {
return bb.BlobClient().SetLegalHold(ctx, legalHold, options)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPs, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) {
return bb.BlobClient().SetTier(ctx, tier, o)
}
// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts.
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-blob-expiry
func (bb *Client) SetExpiry(ctx context.Context, expiryType ExpiryType, o *SetExpiryOptions) (SetExpiryResponse, error) {
if expiryType == nil {
expiryType = ExpiryTypeNever{}
}
et, opts := expiryType.Format(o)
resp, err := bb.innerBlobGenerated().SetExpiry(ctx, et, opts)
return resp, err
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) {
return bb.BlobClient().GetProperties(ctx, o)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o)
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]*string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) {
return bb.BlobClient().SetMetadata(ctx, metadata, o)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (bb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) {
return bb.BlobClient().CreateSnapshot(ctx, o)
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (bb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) {
return bb.BlobClient().StartCopyFromURL(ctx, copySource, o)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (bb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) {
return bb.BlobClient().AbortCopyFromURL(ctx, copyID, o)
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (bb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) {
return bb.BlobClient().SetTags(ctx, tags, o)
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (bb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) {
return bb.BlobClient().GetTags(ctx, o)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) {
return bb.BlobClient().CopyFromURL(ctx, copySource, o)
}
// Concurrent Upload Functions -----------------------------------------------------------------------------------------
// uploadFromReader uploads a buffer in blocks to a block blob.
func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) {
readerSize := actualSize
if o.BlockSize == 0 {
// If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error
if readerSize > MaxStageBlockBytes*MaxBlocks {
return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob")
}
// If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request
if readerSize <= MaxUploadBlobBytes {
o.BlockSize = MaxUploadBlobBytes // Default if unspecified
} else {
if remainder := readerSize % MaxBlocks; remainder > 0 {
// ensure readerSize is a multiple of MaxBlocks
readerSize += (MaxBlocks - remainder)
}
o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
o.BlockSize = blob.DefaultDownloadBlockSize
}
// StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize).
}
}
if readerSize <= MaxUploadBlobBytes {
// If the size can fit in 1 Upload call, do it this way
var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize)
if o.Progress != nil {
body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress)
}
uploadBlockBlobOptions := o.getUploadBlockBlobOptions()
resp, err := bb.Upload(ctx, shared.NopCloser(body), uploadBlockBlobOptions)
return toUploadReaderAtResponseFromUploadResponse(resp), err
}
var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1)
if numBlocks > MaxBlocks {
// prevent any math bugs from attempting to upload too many blocks which will always fail
return uploadFromReaderResponse{}, errors.New("block limit exceeded")
}
if log.Should(exported.EventUpload) {
urlparts, err := blob.ParseURL(bb.generated().Endpoint())
if err == nil {
log.Writef(exported.EventUpload, "blob name %s actual size %v block-size %v block-count %v",
urlparts.BlobName, actualSize, o.BlockSize, numBlocks)
}
}
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
progress := int64(0)
progressLock := &sync.Mutex{}
err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{
OperationName: "uploadFromReader",
TransferSize: readerSize,
ChunkSize: o.BlockSize,
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, offset int64, chunkSize int64) error {
// This function is called once per block.
// It is passed this block's offset within the buffer and its count of bytes
// Prepare to read the proper block/section of the buffer
if chunkSize < o.BlockSize {
// this is the last block. its actual size might be less
// than the calculated size due to rounding up of the payload
// size to fit in a whole number of blocks.
chunkSize = (actualSize - offset)
}
var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize)
blockNum := offset / o.BlockSize
if o.Progress != nil {
blockProgress := int64(0)
body = streaming.NewRequestProgress(shared.NopCloser(body),
func(bytesTransferred int64) {
diff := bytesTransferred - blockProgress
blockProgress = bytesTransferred
progressLock.Lock() // 1 goroutine at a time gets progress report
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
generatedUuid, err := uuid.New()
if err != nil {
return err
}
blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String()))
stageBlockOptions := o.getStageBlockOptions()
_, err = bb.StageBlock(ctx, blockIDList[blockNum], shared.NopCloser(body), stageBlockOptions)
return err
},
})
if err != nil {
return uploadFromReaderResponse{}, err
}
// All put blocks were successful, call Put Block List to finalize the blob
commitBlockListOptions := o.getCommitBlockListOptions()
resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions)
return toUploadReaderAtResponseFromCommitBlockListResponse(resp), err
}
// UploadBuffer uploads a buffer in blocks to a block blob.
func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) {
uploadOptions := uploadFromReaderOptions{}
if o != nil {
uploadOptions = *o
}
return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions)
}
// UploadFile uploads a file in blocks to a block blob.
func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) {
stat, err := file.Stat()
if err != nil {
return uploadFromReaderResponse{}, err
}
uploadOptions := uploadFromReaderOptions{}
if o != nil {
uploadOptions = *o
}
return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions)
}
// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient.
// A Context deadline or cancellation will cause this to error.
func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) {
if o == nil {
o = &UploadStreamOptions{}
}
result, err := copyFromReader(ctx, body, bb, *o, newMMBPool)
if err != nil {
return CommitBlockListResponse{}, err
}
return result, nil
}
// Concurrent Download Functions -----------------------------------------------------------------------------------------
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (bb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) {
return bb.BlobClient().DownloadStream(ctx, o)
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (bb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) {
return bb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o)
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (bb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) {
return bb.BlobClient().DownloadFile(ctx, file, o)
}

View file

@ -0,0 +1,39 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
const (
// CountToEnd specifies the end of the file.
CountToEnd = 0
_1MiB = 1024 * 1024
// MaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
MaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// MaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
MaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB
// MaxBlocks indicates the maximum number of blocks allowed in a block blob.
MaxBlocks = 50000
)
// BlockListType defines values for BlockListType
type BlockListType = generated.BlockListType
const (
BlockListTypeCommitted BlockListType = generated.BlockListTypeCommitted
BlockListTypeUncommitted BlockListType = generated.BlockListTypeUncommitted
BlockListTypeAll BlockListType = generated.BlockListTypeAll
)
// PossibleBlockListTypeValues returns the possible values for the BlockListType const type.
func PossibleBlockListTypeValues() []BlockListType {
return generated.PossibleBlockListTypeValues()
}

View file

@ -0,0 +1,38 @@
//go:build go1.18 && (linux || darwin || freebsd || openbsd || netbsd || solaris)
// +build go1.18
// +build linux darwin freebsd openbsd netbsd solaris
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"fmt"
"os"
"syscall"
)
// mmb is a memory mapped buffer
type mmb []byte
// newMMB creates a new memory mapped buffer with the specified size
func newMMB(size int64) (mmb, error) {
prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE
addr, err := syscall.Mmap(-1, 0, int(size), prot, flags)
if err != nil {
return nil, os.NewSyscallError("Mmap", err)
}
return mmb(addr), nil
}
// delete cleans up the memory mapped buffer
func (m *mmb) delete() {
err := syscall.Munmap(*m)
*m = nil
if err != nil {
// if we get here, there is likely memory corruption.
// please open an issue https://github.com/Azure/azure-sdk-for-go/issues
panic(fmt.Sprintf("Munmap error: %v", err))
}
}

View file

@ -0,0 +1,54 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"fmt"
"os"
"reflect"
"syscall"
"unsafe"
)
// mmb is a memory mapped buffer
type mmb []byte
// newMMB creates a new memory mapped buffer with the specified size
func newMMB(size int64) (mmb, error) {
const InvalidHandleValue = ^uintptr(0) // -1
prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE)
hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil)
if err != nil {
return nil, os.NewSyscallError("CreateFileMapping", err)
}
defer syscall.CloseHandle(hMMF)
addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size))
if err != nil {
return nil, os.NewSyscallError("MapViewOfFile", err)
}
m := mmb{}
h := (*reflect.SliceHeader)(unsafe.Pointer(&m))
h.Data = addr
h.Len = int(size)
h.Cap = h.Len
return m, nil
}
// delete cleans up the memory mapped buffer
func (m *mmb) delete() {
addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0])))
*m = mmb{}
err := syscall.UnmapViewOfFile(addr)
if err != nil {
// if we get here, there is likely memory corruption.
// please open an issue https://github.com/Azure/azure-sdk-for-go/issues
panic(fmt.Sprintf("UnmapViewOfFile error: %v", err))
}
}

View file

@ -0,0 +1,348 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// Type Declarations ---------------------------------------------------------------------
// Block - Represents a single block in a block blob. It describes the block's ID and size.
type Block = generated.Block
// BlockList - can be uncommitted or committed blocks (committed/uncommitted)
type BlockList = generated.BlockList
// Request Model Declaration -------------------------------------------------------------------------------------------
// UploadOptions contains the optional parameters for the Client.Upload method.
type UploadOptions struct {
// Optional. Used to set blob tags in various blob operations.
Tags map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob.
Metadata map[string]*string
// Optional. Indicates the tier to be set on the blob.
Tier *blob.AccessTier
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
HTTPHeaders *blob.HTTPHeaders
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
AccessConditions *blob.AccessConditions
LegalHold *bool
ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting
ImmutabilityPolicyExpiryTime *time.Time
}
func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions,
*generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil
}
basics := generated.BlockBlobClientUploadOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags),
Metadata: o.Metadata,
Tier: o.Tier,
TransactionalContentMD5: o.TransactionalContentMD5,
LegalHold: o.LegalHold,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiryTime,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// StageBlockOptions contains the optional parameters for the Client.StageBlock method.
type StageBlockOptions struct {
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
LeaseAccessConditions *blob.LeaseAccessConditions
// TransactionalValidation specifies the transfer validation type to use.
// The default is nil (no transfer validation).
TransactionalValidation blob.TransferValidationType
}
// StageBlockOptions contains the optional parameters for the Client.StageBlock method.
func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo) {
if o == nil {
return nil, nil, nil, nil
}
return &generated.BlockBlobClientStageBlockOptions{}, o.LeaseAccessConditions, o.CPKInfo, o.CPKScopeInfo
}
// ---------------------------------------------------------------------------------------------------------------------
// StageBlockFromURLOptions contains the optional parameters for the Client.StageBlockFromURL method.
type StageBlockFromURLOptions struct {
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string
LeaseAccessConditions *blob.LeaseAccessConditions
SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions
// SourceContentValidation contains the validation mechanism used on the range of bytes read from the source.
SourceContentValidation blob.SourceContentValidationType
// Range specifies a range of bytes. The default value is all bytes.
Range blob.HTTPRange
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
}
func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil
}
options := &generated.BlockBlobClientStageBlockFromURLOptions{
CopySourceAuthorization: o.CopySourceAuthorization,
SourceRange: exported.FormatHTTPRange(o.Range),
}
if o.SourceContentValidation != nil {
o.SourceContentValidation.Apply(options)
}
return options, o.CPKInfo, o.CPKScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method.
type CommitBlockListOptions struct {
Tags map[string]string
Metadata map[string]*string
RequestID *string
Tier *blob.AccessTier
Timeout *int32
TransactionalContentCRC64 []byte
TransactionalContentMD5 []byte
HTTPHeaders *blob.HTTPHeaders
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
AccessConditions *blob.AccessConditions
LegalHold *bool
ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting
ImmutabilityPolicyExpiryTime *time.Time
}
// ---------------------------------------------------------------------------------------------------------------------
// GetBlockListOptions contains the optional parameters for the Client.GetBlockList method.
type GetBlockListOptions struct {
Snapshot *string
AccessConditions *blob.AccessConditions
}
func (o *GetBlockListOptions) format() (*generated.BlockBlobClientGetBlockListOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &generated.BlockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions
}
// ------------------------------------------------------------
// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions.
type uploadFromReaderOptions struct {
// BlockSize specifies the block size to use; the default (and maximum size) is MaxStageBlockBytes.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient.
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
Progress func(bytesTransferred int64)
// HTTPHeaders indicates the HTTP headers to be associated with the blob.
HTTPHeaders *blob.HTTPHeaders
// Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
Metadata map[string]*string
// AccessConditions indicates the access conditions for the block blob.
AccessConditions *blob.AccessConditions
// AccessTier indicates the tier of blob
AccessTier *blob.AccessTier
// BlobTags
Tags map[string]string
// ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
// Concurrency indicates the maximum number of blocks to upload in parallel (0=default)
Concurrency uint16
TransactionalValidation blob.TransferValidationType
// Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 uint64
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
}
// UploadBufferOptions provides set of configurations for UploadBuffer operation.
type UploadBufferOptions = uploadFromReaderOptions
// UploadFileOptions provides set of configurations for UploadFile operation.
type UploadFileOptions = uploadFromReaderOptions
func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions {
leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions)
return &StageBlockOptions{
CPKInfo: o.CPKInfo,
CPKScopeInfo: o.CPKScopeInfo,
LeaseAccessConditions: leaseAccessConditions,
TransactionalValidation: o.TransactionalValidation,
}
}
func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions {
return &UploadOptions{
Tags: o.Tags,
Metadata: o.Metadata,
Tier: o.AccessTier,
HTTPHeaders: o.HTTPHeaders,
AccessConditions: o.AccessConditions,
CPKInfo: o.CPKInfo,
CPKScopeInfo: o.CPKScopeInfo,
}
}
func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOptions {
return &CommitBlockListOptions{
Tags: o.Tags,
Metadata: o.Metadata,
Tier: o.AccessTier,
HTTPHeaders: o.HTTPHeaders,
CPKInfo: o.CPKInfo,
CPKScopeInfo: o.CPKScopeInfo,
}
}
// ---------------------------------------------------------------------------------------------------------------------
// UploadStreamOptions provides set of configurations for UploadStream operation.
type UploadStreamOptions struct {
// BlockSize defines the size of the buffer used during upload. The default and minimum value is 1 MiB.
BlockSize int64
// Concurrency defines the max number of concurrent uploads to be performed to upload the file.
// Each concurrent upload will create a buffer of size BlockSize. The default value is one.
Concurrency int
TransactionalValidation blob.TransferValidationType
HTTPHeaders *blob.HTTPHeaders
Metadata map[string]*string
AccessConditions *blob.AccessConditions
AccessTier *blob.AccessTier
Tags map[string]string
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
}
func (u *UploadStreamOptions) setDefaults() {
if u.Concurrency == 0 {
u.Concurrency = 1
}
if u.BlockSize < _1MiB {
u.BlockSize = _1MiB
}
}
func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions {
if u == nil {
return nil
}
leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions)
return &StageBlockOptions{
TransactionalValidation: u.TransactionalValidation,
CPKInfo: u.CPKInfo,
CPKScopeInfo: u.CPKScopeInfo,
LeaseAccessConditions: leaseAccessConditions,
}
}
func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions {
if u == nil {
return nil
}
return &CommitBlockListOptions{
Tags: u.Tags,
Metadata: u.Metadata,
Tier: u.AccessTier,
HTTPHeaders: u.HTTPHeaders,
CPKInfo: u.CPKInfo,
CPKScopeInfo: u.CPKScopeInfo,
AccessConditions: u.AccessConditions,
}
}
func (u *UploadStreamOptions) getUploadOptions() *UploadOptions {
if u == nil {
return nil
}
return &UploadOptions{
Tags: u.Tags,
Metadata: u.Metadata,
Tier: u.AccessTier,
HTTPHeaders: u.HTTPHeaders,
CPKInfo: u.CPKInfo,
CPKScopeInfo: u.CPKScopeInfo,
AccessConditions: u.AccessConditions,
}
}
// ---------------------------------------------------------------------------------------------------------------------
// ExpiryType defines values for ExpiryType.
type ExpiryType = exported.ExpiryType
// ExpiryTypeAbsolute defines the absolute time for the blob expiry.
type ExpiryTypeAbsolute = exported.ExpiryTypeAbsolute
// ExpiryTypeRelativeToNow defines the duration relative to now for the blob expiry.
type ExpiryTypeRelativeToNow = exported.ExpiryTypeRelativeToNow
// ExpiryTypeRelativeToCreation defines the duration relative to creation for the blob expiry.
type ExpiryTypeRelativeToCreation = exported.ExpiryTypeRelativeToCreation
// ExpiryTypeNever defines that the blob will be set to never expire.
type ExpiryTypeNever = exported.ExpiryTypeNever
// SetExpiryOptions contains the optional parameters for the Client.SetExpiry method.
type SetExpiryOptions = exported.SetExpiryOptions

View file

@ -0,0 +1,114 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// UploadResponse contains the response from method Client.Upload.
type UploadResponse = generated.BlockBlobClientUploadResponse
// StageBlockResponse contains the response from method Client.StageBlock.
type StageBlockResponse = generated.BlockBlobClientStageBlockResponse
// CommitBlockListResponse contains the response from method Client.CommitBlockList.
type CommitBlockListResponse = generated.BlockBlobClientCommitBlockListResponse
// StageBlockFromURLResponse contains the response from method Client.StageBlockFromURL.
type StageBlockFromURLResponse = generated.BlockBlobClientStageBlockFromURLResponse
// GetBlockListResponse contains the response from method Client.GetBlockList.
type GetBlockListResponse = generated.BlockBlobClientGetBlockListResponse
// uploadFromReaderResponse contains the response from method Client.UploadBuffer/Client.UploadFile.
type uploadFromReaderResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
// Date contains the information returned from the Date header response.
Date *time.Time
// ETag contains the information returned from the ETag header response.
ETag *azcore.ETag
// EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
EncryptionKeySHA256 *string
// EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
EncryptionScope *string
// IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
IsServerEncrypted *bool
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
// Version contains the information returned from the x-ms-version header response.
Version *string
// VersionID contains the information returned from the x-ms-version-id header response.
VersionID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
// Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB)
ContentCRC64 []byte
}
func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse {
return uploadFromReaderResponse{
ClientRequestID: resp.ClientRequestID,
ContentMD5: resp.ContentMD5,
Date: resp.Date,
ETag: resp.ETag,
EncryptionKeySHA256: resp.EncryptionKeySHA256,
EncryptionScope: resp.EncryptionScope,
IsServerEncrypted: resp.IsServerEncrypted,
LastModified: resp.LastModified,
RequestID: resp.RequestID,
Version: resp.Version,
VersionID: resp.VersionID,
}
}
func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListResponse) uploadFromReaderResponse {
return uploadFromReaderResponse{
ClientRequestID: resp.ClientRequestID,
ContentMD5: resp.ContentMD5,
Date: resp.Date,
ETag: resp.ETag,
EncryptionKeySHA256: resp.EncryptionKeySHA256,
EncryptionScope: resp.EncryptionScope,
IsServerEncrypted: resp.IsServerEncrypted,
LastModified: resp.LastModified,
RequestID: resp.RequestID,
Version: resp.Version,
VersionID: resp.VersionID,
ContentCRC64: resp.ContentCRC64,
}
}
// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile.
type UploadFileResponse = uploadFromReaderResponse
// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile.
type UploadBufferResponse = uploadFromReaderResponse
// UploadStreamResponse contains the response from method Client.CommitBlockList.
type UploadStreamResponse = CommitBlockListResponse
// SetExpiryResponse contains the response from method Client.SetExpiry.
type SetExpiryResponse = generated.BlobClientSetExpiryResponse

View file

@ -0,0 +1,28 @@
trigger:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/storage/azblob
pr:
branches:
include:
- main
- feature/*
- hotfix/*
- release/*
paths:
include:
- sdk/storage/azblob
stages:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: 'storage/azblob'
RunLiveTests: true

View file

@ -0,0 +1,176 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azblob
import (
"context"
"io"
"os"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type Client struct {
svc *service.Client
}
// NewClient creates an instance of Client with the specified values.
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
var clientOptions *service.ClientOptions
if options != nil {
clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions}
}
svcClient, err := service.NewClient(serviceURL, cred, clientOptions)
if err != nil {
return nil, err
}
return &Client{
svc: svcClient,
}, nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
// This is used to anonymously access a storage account or with a shared access signature (SAS) token.
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/?<sas token>
// - options - client options; pass nil to accept the default values
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
var clientOptions *service.ClientOptions
if options != nil {
clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions}
}
svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions)
if err != nil {
return nil, err
}
return &Client{
svc: svcClient,
}, nil
}
// NewClientWithSharedKeyCredential creates an instance of Client with the specified values.
// - serviceURL - the URL of the storage account e.g. https://<account>.blob.core.windows.net/
// - cred - a SharedKeyCredential created with the matching storage account and access key
// - options - client options; pass nil to accept the default values
func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options))
if err != nil {
return nil, err
}
return &Client{
svc: svcClient,
}, nil
}
// NewClientFromConnectionString creates an instance of Client with the specified values.
// - connectionString - a connection string for the desired storage account
// - options - client options; pass nil to accept the default values
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
if options == nil {
options = &ClientOptions{}
}
containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options))
if err != nil {
return nil, err
}
return &Client{
svc: containerClient,
}, nil
}
// URL returns the URL endpoint used by the BlobClient object.
func (c *Client) URL() string {
return c.svc.URL()
}
// ServiceClient returns the embedded service client for this client.
func (c *Client) ServiceClient() *service.Client {
return c.svc
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
// If the container with the same name already exists, a ResourceExistsError will be raised.
// This method returns a client with which to interact with the newly created container.
func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) {
return c.svc.CreateContainer(ctx, containerName, o)
}
// DeleteContainer is a lifecycle method that marks the specified container for deletion.
// The container and any blobs contained within it are later deleted during garbage collection.
// If the container is not found, a ResourceNotFoundError will be raised.
func (c *Client) DeleteContainer(ctx context.Context, containerName string, o *DeleteContainerOptions) (DeleteContainerResponse, error) {
return c.svc.DeleteContainer(ctx, containerName, o)
}
// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (c *Client) DeleteBlob(ctx context.Context, containerName string, blobName string, o *DeleteBlobOptions) (DeleteBlobResponse, error) {
return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).Delete(ctx, o)
}
// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c *Client) NewListBlobsFlatPager(containerName string, o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] {
return c.svc.NewContainerClient(containerName).NewListBlobsFlatPager(o)
}
// NewListContainersPager operation returns a pager of the containers under the specified account.
// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2.
func (c *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] {
return c.svc.NewListContainersPager(o)
}
// UploadBuffer uploads a buffer in blocks to a block blob.
func (c *Client) UploadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) {
return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadBuffer(ctx, buffer, o)
}
// UploadFile uploads a file in blocks to a block blob.
func (c *Client) UploadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) {
return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadFile(ctx, file, o)
}
// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient.
// A Context deadline or cancellation will cause this to error.
func (c *Client) UploadStream(ctx context.Context, containerName string, blobName string, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) {
return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadStream(ctx, body, o)
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (c *Client) DownloadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *DownloadBufferOptions) (int64, error) {
return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o)
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (c *Client) DownloadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *DownloadFileOptions) (int64, error) {
return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadFile(ctx, file, o)
}
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (c *Client) DownloadStream(ctx context.Context, containerName string, blobName string, o *DownloadStreamOptions) (DownloadStreamResponse, error) {
o = shared.CopyOptions(o)
return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadStream(ctx, o)
}

View file

@ -0,0 +1,36 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// SharedKeyCredential contains an account's name and its primary or secondary key.
type SharedKeyCredential = exported.SharedKeyCredential
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
return exported.NewSharedKeyCredential(accountName, accountKey)
}
// URLParts object represents the components that make up an Azure Storage Container/Blob URL.
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type URLParts = sas.URLParts
// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object.
func ParseURL(u string) (URLParts, error) {
return sas.ParseURL(u)
}
// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and
// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange
// which has an offset but no zero value count indicates from the offset to the resource's end.
type HTTPRange = exported.HTTPRange

View file

@ -0,0 +1,37 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// PublicAccessType defines values for AccessType - private (default) or blob or container.
type PublicAccessType = generated.PublicAccessType
const (
PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob
PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer
)
// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type.
func PossiblePublicAccessTypeValues() []PublicAccessType {
return generated.PossiblePublicAccessTypeValues()
}
// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType.
type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType
const (
DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude
DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly
)
// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type.
func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType {
return generated.PossibleDeleteSnapshotsOptionTypeValues()
}

Some files were not shown because too many files have changed in this diff Show more