deps: update osbuild/images to 246b718310ea

Current main.
246b718310
This commit is contained in:
Achilleas Koutsou 2023-07-19 17:22:28 +02:00 committed by Ondřej Budai
parent 326f0cfa2f
commit 5c292c61c6
1437 changed files with 208886 additions and 87131 deletions

View file

@ -1,5 +1,56 @@
# Release History
## 1.6.0 (2023-05-04)
### Features Added
* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable.
* Added `TenantID` field to `policy.TokenRequestOptions`.
## 1.5.0 (2023-04-06)
### Features Added
* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry.
### Breaking Changes
> These changes affect only code written against a beta version such as v1.5.0-beta.1
> These features will return in v1.6.0-beta.1.
* Removed `TokenRequestOptions.Claims` and `.TenantID`
* Removed ARM client support for CAE and cross-tenant auth.
### Bugs Fixed
* Added non-conformant LRO terminal states `Cancelled` and `Completed`.
### Other Changes
* Updated to latest `internal` module.
## 1.5.0-beta.1 (2023-03-02)
### Features Added
* This release includes the features added in v1.4.0-beta.1
## 1.4.0 (2023-03-02)
> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1.
### Features Added
* Add `Clone()` method for `arm/policy.ClientOptions`.
### Bugs Fixed
* ARM's RP registration policy will no longer swallow unrecognized errors.
* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`.
* Fixed wrong policy copy in `arm/runtime.NewPipeline()`.
## 1.4.0-beta.1 (2023-02-02)
### Features Added
* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable.
* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`.
* ARM bearer token policy handles CAE challenges.
## 1.3.1 (2023-02-02)
### Other Changes
* Update dependencies to latest versions.
## 1.3.0 (2023-01-06)
### Features Added

View file

@ -11,8 +11,6 @@ import (
"io"
"net/http"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
type nopCloser struct {
@ -43,24 +41,6 @@ func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
return false
}
// Payload reads and returns the response body or an error.
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
// Exported as runtime.Payload().
func Payload(resp *http.Response) ([]byte, error) {
// r.Body won't be a nopClosingBytesReader if downloading was skipped
if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
return buf.Bytes(), nil
}
bytesBody, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
resp.Body = shared.NewNopClosingBytesReader(bytesBody)
return bytesBody, nil
}
// AccessToken represents an Azure service bearer access token with expiry information.
// Exported as azcore.AccessToken.
type AccessToken struct {
@ -73,6 +53,10 @@ type AccessToken struct {
type TokenRequestOptions struct {
// Scopes contains the list of permission scopes required for the token.
Scopes []string
// TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in
// their configured default tenants when this field isn't set.
TenantID string
}
// TokenCredential represents a credential capable of providing an OAuth token.

View file

@ -103,6 +103,7 @@ func (req *Request) OperationValue(value interface{}) bool {
// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length
// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "",
// Content-Type won't be set.
// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser.
func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error {
var err error
var size int64
@ -168,3 +169,14 @@ func (req *Request) Clone(ctx context.Context) *Request {
r2.req = req.req.Clone(ctx)
return &r2
}
// not exported but dependent on Request
// PolicyFunc is a type that implements the Policy interface.
// Use this type when implementing a stateless policy as a first-class function.
type PolicyFunc func(*Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf PolicyFunc) Do(req *Request) (*http.Response, error) {
return pf(req)
}

View file

@ -12,6 +12,8 @@ import (
"fmt"
"net/http"
"regexp"
"github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
)
// NewResponseError creates a new *ResponseError from the provided HTTP response.
@ -29,7 +31,7 @@ func NewResponseError(resp *http.Response) error {
}
// if we didn't get x-ms-error-code, check in the response body
body, err := Payload(resp)
body, err := exported.Payload(resp, nil)
if err != nil {
return err
}
@ -121,7 +123,7 @@ func (e *ResponseError) Error() string {
fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
body, err := Payload(e.RawResponse)
body, err := exported.Payload(e.RawResponse, nil)
if err != nil {
// this really shouldn't fail at this point as the response
// body is already cached (it was read in NewResponseError)

View file

@ -16,6 +16,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md
@ -68,15 +69,15 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
if asyncURL == "" {
return nil, errors.New("response is missing Azure-AsyncOperation header")
}
if !pollers.IsValidURL(asyncURL) {
if !poller.IsValidURL(asyncURL) {
return nil, fmt.Errorf("invalid polling URL %s", asyncURL)
}
// check for provisioning state. if the operation is a RELO
// and terminates synchronously this will prevent extra polling.
// it's ok if there's no provisioning state.
state, _ := pollers.GetProvisioningState(resp)
state, _ := poller.GetProvisioningState(resp)
if state == "" {
state = pollers.StatusInProgress
state = poller.StatusInProgress
}
p := &Poller[T]{
pl: pl,
@ -93,17 +94,17 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
// Done returns true if the LRO is in a terminal state.
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
return poller.IsTerminalState(p.CurState)
}
// Poll retrieves the current state of the LRO.
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) {
if !pollers.StatusCodeValid(resp) {
if !poller.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
state, err := pollers.GetStatus(resp)
state, err := poller.GetStatus(resp)
if err != nil {
return "", err
} else if state == "" {
@ -122,7 +123,7 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
if p.resp.StatusCode == http.StatusNoContent {
return nil
} else if pollers.Failed(p.CurState) {
} else if poller.Failed(p.CurState) {
return exported.NewResponseError(p.resp)
}
var req *exported.Request
@ -154,5 +155,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
p.resp = resp
}
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
}

View file

@ -14,6 +14,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// Kind is the identifier of this type in a resume token.
@ -72,9 +73,9 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
}
// default initial state to InProgress. depending on the HTTP
// status code and provisioning state, we might change the value.
curState := pollers.StatusInProgress
provState, err := pollers.GetProvisioningState(resp)
if err != nil && !errors.Is(err, pollers.ErrNoBody) {
curState := poller.StatusInProgress
provState, err := poller.GetProvisioningState(resp)
if err != nil && !errors.Is(err, poller.ErrNoBody) {
return nil, err
}
if resp.StatusCode == http.StatusCreated && provState != "" {
@ -85,37 +86,37 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
curState = provState
} else if provState == "" {
// for a 200, absense of provisioning state indicates success
curState = pollers.StatusSucceeded
curState = poller.StatusSucceeded
}
} else if resp.StatusCode == http.StatusNoContent {
curState = pollers.StatusSucceeded
curState = poller.StatusSucceeded
}
p.CurState = curState
return p, nil
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
return poller.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
if !pollers.StatusCodeValid(resp) {
if !poller.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
if resp.StatusCode == http.StatusNoContent {
p.resp = resp
p.CurState = pollers.StatusSucceeded
p.CurState = poller.StatusSucceeded
return p.CurState, nil
}
state, err := pollers.GetProvisioningState(resp)
if errors.Is(err, pollers.ErrNoBody) {
state, err := poller.GetProvisioningState(resp)
if errors.Is(err, poller.ErrNoBody) {
// a missing response body in non-204 case is an error
return "", err
} else if state == "" {
// a response body without provisioning state is considered terminal success
state = pollers.StatusSucceeded
state = poller.StatusSucceeded
} else if err != nil {
return "", err
}
@ -130,5 +131,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
}

View file

@ -16,6 +16,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// Kind is the identifier of this type in a resume token.
@ -61,15 +62,15 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
if locURL == "" {
return nil, errors.New("response is missing Location header")
}
if !pollers.IsValidURL(locURL) {
if !poller.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid polling URL %s", locURL)
}
// check for provisioning state. if the operation is a RELO
// and terminates synchronously this will prevent extra polling.
// it's ok if there's no provisioning state.
state, _ := pollers.GetProvisioningState(resp)
state, _ := poller.GetProvisioningState(resp)
if state == "" {
state = pollers.StatusInProgress
state = poller.StatusInProgress
}
return &Poller[T]{
pl: pl,
@ -81,7 +82,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
return poller.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
@ -93,17 +94,17 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
// if provisioning state is available, use that. this is only
// for some ARM LRO scenarios (e.g. DELETE with a Location header)
// so if it's missing then use HTTP status code.
provState, _ := pollers.GetProvisioningState(resp)
provState, _ := poller.GetProvisioningState(resp)
p.resp = resp
if provState != "" {
p.CurState = provState
} else if resp.StatusCode == http.StatusAccepted {
p.CurState = pollers.StatusInProgress
p.CurState = poller.StatusInProgress
} else if resp.StatusCode > 199 && resp.StatusCode < 300 {
// any 2xx other than a 202 indicates success
p.CurState = pollers.StatusSucceeded
p.CurState = poller.StatusSucceeded
} else {
p.CurState = pollers.StatusFailed
p.CurState = poller.StatusFailed
}
return p.CurState, nil
})
@ -114,5 +115,5 @@ func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
}
func (p *Poller[T]) Result(ctx context.Context, out *T) error {
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
}

View file

@ -16,6 +16,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// Applicable returns true if the LRO is using Operation-Location.
@ -54,19 +55,19 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
if opURL == "" {
return nil, errors.New("response is missing Operation-Location header")
}
if !pollers.IsValidURL(opURL) {
if !poller.IsValidURL(opURL) {
return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL)
}
locURL := resp.Header.Get(shared.HeaderLocation)
// Location header is optional
if locURL != "" && !pollers.IsValidURL(locURL) {
if locURL != "" && !poller.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid Location URL %s", locURL)
}
// default initial state to InProgress. if the
// service sent us a status then use that instead.
curState := pollers.StatusInProgress
status, err := pollers.GetStatus(resp)
if err != nil && !errors.Is(err, pollers.ErrNoBody) {
curState := poller.StatusInProgress
status, err := poller.GetStatus(resp)
if err != nil && !errors.Is(err, poller.ErrNoBody) {
return nil, err
}
if status != "" {
@ -86,16 +87,16 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
}
func (p *Poller[T]) Done() bool {
return pollers.IsTerminalState(p.CurState)
return poller.IsTerminalState(p.CurState)
}
func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) {
if !pollers.StatusCodeValid(resp) {
if !poller.StatusCodeValid(resp) {
p.resp = resp
return "", exported.NewResponseError(resp)
}
state, err := pollers.GetStatus(resp)
state, err := poller.GetStatus(resp)
if err != nil {
return "", err
} else if state == "" {
@ -118,7 +119,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
} else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost {
// no final GET required, terminal response should have it
} else if rl, rlErr := pollers.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, pollers.ErrNoBody) {
} else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) {
return rlErr
} else if rl != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, rl)
@ -140,5 +141,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
p.resp = resp
}
return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), out)
}

View file

@ -12,49 +12,15 @@ import (
"errors"
"fmt"
"net/http"
"net/url"
"reflect"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// the well-known set of LRO status/provisioning state values.
const (
StatusSucceeded = "Succeeded"
StatusCanceled = "Canceled"
StatusFailed = "Failed"
StatusInProgress = "InProgress"
)
// IsTerminalState returns true if the LRO's state is terminal.
func IsTerminalState(s string) bool {
return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
}
// Failed returns true if the LRO's state is terminal failure.
func Failed(s string) bool {
return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
}
// Succeeded returns true if the LRO's state is terminal success.
func Succeeded(s string) bool {
return strings.EqualFold(s, StatusSucceeded)
}
// returns true if the LRO response contains a valid HTTP status code
func StatusCodeValid(resp *http.Response) bool {
return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent)
}
// IsValidURL verifies that the URL is valid and absolute.
func IsValidURL(s string) bool {
u, err := url.Parse(s)
return err == nil && u.IsAbs()
}
// getTokenTypeName creates a type name from the type parameter T.
func getTokenTypeName[T any]() (string, error) {
tt := shared.TypeOfT[T]()
@ -130,102 +96,6 @@ func IsTokenValid[T any](token string) error {
return nil
}
// ErrNoBody is returned if the response didn't contain a body.
var ErrNoBody = errors.New("the response did not contain a body")
// GetJSON reads the response body into a raw JSON object.
// It returns ErrNoBody if there was no content.
func GetJSON(resp *http.Response) (map[string]interface{}, error) {
body, err := exported.Payload(resp)
if err != nil {
return nil, err
}
if len(body) == 0 {
return nil, ErrNoBody
}
// unmarshall the body to get the value
var jsonBody map[string]interface{}
if err = json.Unmarshal(body, &jsonBody); err != nil {
return nil, err
}
return jsonBody, nil
}
// provisioningState returns the provisioning state from the response or the empty string.
func provisioningState(jsonBody map[string]interface{}) string {
jsonProps, ok := jsonBody["properties"]
if !ok {
return ""
}
props, ok := jsonProps.(map[string]interface{})
if !ok {
return ""
}
rawPs, ok := props["provisioningState"]
if !ok {
return ""
}
ps, ok := rawPs.(string)
if !ok {
return ""
}
return ps
}
// status returns the status from the response or the empty string.
func status(jsonBody map[string]interface{}) string {
rawStatus, ok := jsonBody["status"]
if !ok {
return ""
}
status, ok := rawStatus.(string)
if !ok {
return ""
}
return status
}
// GetStatus returns the LRO's status from the response body.
// Typically used for Azure-AsyncOperation flows.
// If there is no status in the response body the empty string is returned.
func GetStatus(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
return status(jsonBody), nil
}
// GetProvisioningState returns the LRO's state from the response body.
// If there is no state in the response body the empty string is returned.
func GetProvisioningState(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
return provisioningState(jsonBody), nil
}
// GetResourceLocation returns the LRO's resourceLocation value from the response body.
// Typically used for Operation-Location flows.
// If there is no resourceLocation in the response body the empty string is returned.
func GetResourceLocation(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
v, ok := jsonBody["resourceLocation"]
if !ok {
// it might be ok if the field doesn't exist, the caller must make that determination
return "", nil
}
vv, ok := v.(string)
if !ok {
return "", fmt.Errorf("the resourceLocation value %v was not in string format", v)
}
return vv, nil
}
// used if the operation synchronously completed
type NopPoller[T any] struct {
resp *http.Response
@ -239,7 +109,7 @@ func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) {
if resp.StatusCode == http.StatusNoContent {
return np, nil
}
payload, err := exported.Payload(resp)
payload, err := exported.Payload(resp, nil)
if err != nil {
return nil, err
}
@ -269,8 +139,8 @@ func (p *NopPoller[T]) Result(ctx context.Context, out *T) error {
// If the request fails, the update func is not called.
// The update func returns the state of the operation for logging purposes or an error
// if it fails to extract the required state from the response.
func PollHelper(ctx context.Context, endpoint string, pl exported.Pipeline, update func(resp *http.Response) (string, error)) error {
req, err := exported.NewRequest(ctx, http.MethodGet, endpoint)
func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error {
req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint)
if err != nil {
return err
}
@ -296,13 +166,13 @@ func ResultHelper[T any](resp *http.Response, failed bool, out *T) error {
}
defer resp.Body.Close()
if !StatusCodeValid(resp) || failed {
if !poller.StatusCodeValid(resp) || failed {
// the LRO failed. unmarshall the error and update state
return exported.NewResponseError(resp)
return azexported.NewResponseError(resp)
}
// success case
payload, err := exported.Payload(resp)
payload, err := exported.Payload(resp, nil)
if err != nil {
return err
}

View file

@ -21,6 +21,8 @@ const (
HeaderOperationLocation = "Operation-Location"
HeaderRetryAfter = "Retry-After"
HeaderUserAgent = "User-Agent"
HeaderWWWAuthenticate = "WWW-Authenticate"
HeaderXMSClientRequestID = "x-ms-client-request-id"
)
const BearerTokenPrefix = "Bearer "
@ -30,5 +32,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.3.0"
Version = "v1.6.0"
)

View file

@ -8,9 +8,7 @@ package shared
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"reflect"
"regexp"
@ -64,71 +62,6 @@ func TypeOfT[T any]() reflect.Type {
return reflect.TypeOf((*T)(nil)).Elem()
}
// BytesSetter abstracts replacing a byte slice on some type.
type BytesSetter interface {
Set(b []byte)
}
// NewNopClosingBytesReader creates a new *NopClosingBytesReader for the specified slice.
func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader {
return &NopClosingBytesReader{s: data}
}
// NopClosingBytesReader is an io.ReadSeekCloser around a byte slice.
// It also provides direct access to the byte slice to avoid rereading.
type NopClosingBytesReader struct {
s []byte
i int64
}
// Bytes returns the underlying byte slice.
func (r *NopClosingBytesReader) Bytes() []byte {
return r.s
}
// Close implements the io.Closer interface.
func (*NopClosingBytesReader) Close() error {
return nil
}
// Read implements the io.Reader interface.
func (r *NopClosingBytesReader) Read(b []byte) (n int, err error) {
if r.i >= int64(len(r.s)) {
return 0, io.EOF
}
n = copy(b, r.s[r.i:])
r.i += int64(n)
return
}
// Set replaces the existing byte slice with the specified byte slice and resets the reader.
func (r *NopClosingBytesReader) Set(b []byte) {
r.s = b
r.i = 0
}
// Seek implements the io.Seeker interface.
func (r *NopClosingBytesReader) Seek(offset int64, whence int) (int64, error) {
var i int64
switch whence {
case io.SeekStart:
i = offset
case io.SeekCurrent:
i = r.i + offset
case io.SeekEnd:
i = int64(len(r.s)) + offset
default:
return 0, errors.New("nopClosingBytesReader: invalid whence")
}
if i < 0 {
return 0, errors.New("nopClosingBytesReader: negative position")
}
r.i = i
return i, nil
}
var _ BytesSetter = (*NopClosingBytesReader)(nil)
// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface.
type TransportFunc func(*http.Request) (*http.Response, error)

View file

@ -99,7 +99,7 @@ type RetryOptions struct {
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation.
// Typically the value is greater than or equal to the value specified in RetryDelay.
// The default Value is 120 seconds. A value less than zero means there is no cap.
// The default Value is 60 seconds. A value less than zero means there is no cap.
MaxRetryDelay time.Duration
// StatusCodes specifies the HTTP status codes that indicate the operation should be retried.
@ -113,6 +113,15 @@ type RetryOptions struct {
// Specifying values will replace the default values.
// Specifying an empty slice will disable retries for HTTP status codes.
StatusCodes []int
// ShouldRetry evaluates if the retry policy should retry the request.
// When specified, the function overrides comparison against the list of
// HTTP status codes and error checking within the retry policy. Context
// and NonRetriable errors remain evaluated before calling ShouldRetry.
// The *http.Response and error parameters are mutually exclusive, i.e.
// if one is nil, the other is not nil.
// A return value of true means the retry policy should retry.
ShouldRetry func(*http.Response, error) bool
}
// TelemetryOptions configures the telemetry policy's behavior.

View file

@ -7,8 +7,6 @@
package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
@ -46,7 +44,7 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy
}
// we put the includeResponsePolicy at the very beginning so that the raw response
// is populated with the final response (some policies might mutate the response)
policies := []policy.Policy{policyFunc(includeResponsePolicy)}
policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)}
if cp.APIVersion != "" {
policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion))
}
@ -59,19 +57,10 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy
policies = append(policies, plOpts.PerRetry...)
policies = append(policies, cp.PerRetryPolicies...)
policies = append(policies, NewLogPolicy(&cp.Logging))
policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy))
policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy))
transport := cp.Transport
if transport == nil {
transport = defaultHTTPClient
}
return exported.NewPipeline(transport, policies...)
}
// policyFunc is a type that implements the Policy interface.
// Use this type when implementing a stateless policy as a first-class function.
type policyFunc func(*policy.Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
return pf(req)
}

View file

@ -11,7 +11,6 @@ import (
"net/http"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
)
@ -29,7 +28,7 @@ func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) {
}
// Either bodyDownloadPolicyOpValues was not specified (so skip is false)
// or it was specified and skip is false: don't skip downloading the body
_, err = exported.Payload(resp)
_, err = Payload(resp)
if err != nil {
return resp, newBodyDownloadError(err, req)
}

View file

@ -11,6 +11,7 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strings"
"time"
@ -66,12 +67,7 @@ func NewLogPolicy(o *policy.LogOptions) policy.Policy {
allowedHeaders[strings.ToLower(ah)] = struct{}{}
}
// now do the same thing for query params
allowedQP := map[string]struct{}{
"api-version": {},
}
for _, qp := range o.AllowedQueryParams {
allowedQP[strings.ToLower(qp)] = struct{}{}
}
allowedQP := getAllowedQueryParams(o.AllowedQueryParams)
return &logPolicy{
includeBody: o.IncludeBody,
allowedHeaders: allowedHeaders,
@ -79,6 +75,18 @@ func NewLogPolicy(o *policy.LogOptions) policy.Policy {
}
}
// getAllowedQueryParams merges the default set of allowed query parameters
// with a custom set (usually comes from client options).
func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} {
allowedQP := map[string]struct{}{
"api-version": {},
}
for _, qp := range customAllowedQP {
allowedQP[strings.ToLower(qp)] = struct{}{}
}
return allowedQP
}
// logPolicyOpValues is the struct containing the per-operation values
type logPolicyOpValues struct {
try int32
@ -140,20 +148,24 @@ func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) {
const redactedValue = "REDACTED"
// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
// not nil, then these are also written into the Buffer.
func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
// getSanitizedURL returns a sanitized string for the provided url.URL
func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string {
// redact applicable query params
cpURL := *req.Raw().URL
qp := cpURL.Query()
qp := u.Query()
for k := range qp {
if _, ok := p.allowedQP[strings.ToLower(k)]; !ok {
if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok {
qp.Set(k, redactedValue)
}
}
cpURL.RawQuery = qp.Encode()
u.RawQuery = qp.Encode()
return u.String()
}
// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
// not nil, then these are also written into the Buffer.
func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
// Write the request into the buffer.
fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n")
fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n")
p.writeHeader(b, req.Raw().Header)
if resp != nil {
fmt.Fprintln(b, " --------------------------------------------------------------------------------")

View file

@ -9,6 +9,7 @@ package runtime
import (
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
@ -21,13 +22,12 @@ func NewRequestIDPolicy() policy.Policy {
}
func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) {
const requestIdHeader = "x-ms-client-request-id"
if req.Raw().Header.Get(requestIdHeader) == "" {
if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" {
id, err := uuid.New()
if err != nil {
return nil, err
}
req.Raw().Header.Set(requestIdHeader, id.String())
req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String())
}
return req.Next()

View file

@ -19,6 +19,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
"github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
)
const (
@ -133,7 +134,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
// if the body was already downloaded or there was an error it's safe to cancel the context now
if err != nil {
tryCancel()
} else if _, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
} else if exported.PayloadDownloaded(resp) {
tryCancel()
} else {
// must cancel the context after the body has been read and closed
@ -146,11 +147,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
log.Writef(log.EventRetryPolicy, "error %v", err)
}
if err == nil && !HasStatusCode(resp, options.StatusCodes...) {
// if there is no error and the response code isn't in the list of retry codes then we're done.
log.Write(log.EventRetryPolicy, "exit due to non-retriable status code")
return
} else if ctxErr := req.Raw().Context().Err(); ctxErr != nil {
if ctxErr := req.Raw().Context().Err(); ctxErr != nil {
// don't retry if the parent context has been cancelled or its deadline exceeded
err = ctxErr
log.Writef(log.EventRetryPolicy, "abort due to %v", err)
@ -165,6 +162,19 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
return
}
if options.ShouldRetry != nil {
// a non-nil ShouldRetry overrides our HTTP status code check
if !options.ShouldRetry(resp, err) {
// predicate says we shouldn't retry
log.Write(log.EventRetryPolicy, "exit due to ShouldRetry")
return
}
} else if err == nil && !HasStatusCode(resp, options.StatusCodes...) {
// if there is no error and the response code isn't in the list of retry codes then we're done.
log.Write(log.EventRetryPolicy, "exit due to non-retriable status code")
return
}
if try == options.MaxRetries+1 {
// max number of tries has been reached, don't sleep again
log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries)

View file

@ -23,6 +23,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
)
// FinalStateVia is the enumerated type for the possible final-state-via values.
@ -75,7 +76,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol
defer resp.Body.Close()
// this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
// ideally the codegen should return an error if the initial response failed and not even create a poller.
if !pollers.StatusCodeValid(resp) {
if !poller.StatusCodeValid(resp) {
return nil, errors.New("the operation failed or was cancelled")
}
@ -146,7 +147,9 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
opr := options.Handler
// now rehydrate the poller based on the encoded poller type
if async.CanResume(asJSON) {
if opr != nil {
log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
} else if async.CanResume(asJSON) {
opr, _ = async.New[T](pl, nil, "")
} else if body.CanResume(asJSON) {
opr, _ = body.New[T](pl, nil)
@ -154,8 +157,6 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
opr, _ = loc.New[T](pl, nil)
} else if op.CanResume(asJSON) {
opr, _ = op.New[T](pl, nil, "")
} else if opr != nil {
log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
} else {
return nil, fmt.Errorf("unhandled poller token %s", string(raw))
}

View file

@ -15,15 +15,14 @@ import (
"io"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
)
// Payload reads and returns the response body or an error.
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
func Payload(resp *http.Response) ([]byte, error) {
return exported.Payload(resp)
return exported.Payload(resp, nil)
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
@ -92,15 +91,15 @@ func Drain(resp *http.Response) {
// removeBOM removes any byte-order mark prefix from the payload if present.
func removeBOM(resp *http.Response) error {
payload, err := Payload(resp)
_, err := exported.Payload(resp, &exported.PayloadOptions{
BytesModifier: func(b []byte) []byte {
// UTF8
return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
},
})
if err != nil {
return err
}
// UTF8
trimmed := bytes.TrimPrefix(payload, []byte("\xef\xbb\xbf"))
if len(trimmed) < len(payload) {
resp.Body.(shared.BytesSetter).Set(trimmed)
}
return nil
}

View file

@ -20,6 +20,9 @@ type progress struct {
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an
// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser
// has its underlying stream closed.
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return exported.NopCloser(rs)
}

View file

@ -0,0 +1,124 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"errors"
"io"
"net/http"
)
// HasStatusCode returns true if the Response's status code is one of the specified values.
// Exported as runtime.HasStatusCode().
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
if resp == nil {
return false
}
for _, sc := range statusCodes {
if resp.StatusCode == sc {
return true
}
}
return false
}
// PayloadOptions contains the optional values for the Payload func.
// NOT exported but used by azcore.
type PayloadOptions struct {
// BytesModifier receives the downloaded byte slice and returns an updated byte slice.
// Use this to modify the downloaded bytes in a payload (e.g. removing a BOM).
BytesModifier func([]byte) []byte
}
// Payload reads and returns the response body or an error.
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
// Exported as runtime.Payload() WITHOUT the opts parameter.
func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) {
modifyBytes := func(b []byte) []byte { return b }
if opts != nil && opts.BytesModifier != nil {
modifyBytes = opts.BytesModifier
}
// r.Body won't be a nopClosingBytesReader if downloading was skipped
if buf, ok := resp.Body.(*nopClosingBytesReader); ok {
bytesBody := modifyBytes(buf.Bytes())
buf.Set(bytesBody)
return bytesBody, nil
}
bytesBody, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, err
}
bytesBody = modifyBytes(bytesBody)
resp.Body = &nopClosingBytesReader{s: bytesBody}
return bytesBody, nil
}
// PayloadDownloaded returns true if the response body has already been downloaded.
// This implies that the Payload() func above has been previously called.
// NOT exported but used by azcore.
func PayloadDownloaded(resp *http.Response) bool {
_, ok := resp.Body.(*nopClosingBytesReader)
return ok
}
// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice.
// It also provides direct access to the byte slice to avoid rereading.
type nopClosingBytesReader struct {
s []byte
i int64
}
// Bytes returns the underlying byte slice.
func (r *nopClosingBytesReader) Bytes() []byte {
return r.s
}
// Close implements the io.Closer interface.
func (*nopClosingBytesReader) Close() error {
return nil
}
// Read implements the io.Reader interface.
func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) {
if r.i >= int64(len(r.s)) {
return 0, io.EOF
}
n = copy(b, r.s[r.i:])
r.i += int64(n)
return
}
// Set replaces the existing byte slice with the specified byte slice and resets the reader.
func (r *nopClosingBytesReader) Set(b []byte) {
r.s = b
r.i = 0
}
// Seek implements the io.Seeker interface.
func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) {
var i int64
switch whence {
case io.SeekStart:
i = offset
case io.SeekCurrent:
i = r.i + offset
case io.SeekEnd:
i = int64(len(r.s)) + offset
default:
return 0, errors.New("nopClosingBytesReader: invalid whence")
}
if i < 0 {
return 0, errors.New("nopClosingBytesReader: negative position")
}
r.i = i
return i, nil
}

View file

@ -0,0 +1,155 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package poller
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
)
// the well-known set of LRO status/provisioning state values.
const (
StatusSucceeded = "Succeeded"
StatusCanceled = "Canceled"
StatusFailed = "Failed"
StatusInProgress = "InProgress"
)
// these are non-conformant states that we've seen in the wild.
// we support them for back-compat.
const (
StatusCancelled = "Cancelled"
StatusCompleted = "Completed"
)
// IsTerminalState returns true if the LRO's state is terminal.
func IsTerminalState(s string) bool {
return Failed(s) || Succeeded(s)
}
// Failed returns true if the LRO's state is terminal failure.
func Failed(s string) bool {
return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled)
}
// Succeeded returns true if the LRO's state is terminal success.
func Succeeded(s string) bool {
return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted)
}
// returns true if the LRO response contains a valid HTTP status code
func StatusCodeValid(resp *http.Response) bool {
return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent)
}
// IsValidURL verifies that the URL is valid and absolute.
func IsValidURL(s string) bool {
u, err := url.Parse(s)
return err == nil && u.IsAbs()
}
// ErrNoBody is returned if the response didn't contain a body.
var ErrNoBody = errors.New("the response did not contain a body")
// GetJSON reads the response body into a raw JSON object.
// It returns ErrNoBody if there was no content.
func GetJSON(resp *http.Response) (map[string]any, error) {
body, err := exported.Payload(resp, nil)
if err != nil {
return nil, err
}
if len(body) == 0 {
return nil, ErrNoBody
}
// unmarshall the body to get the value
var jsonBody map[string]any
if err = json.Unmarshal(body, &jsonBody); err != nil {
return nil, err
}
return jsonBody, nil
}
// provisioningState returns the provisioning state from the response or the empty string.
func provisioningState(jsonBody map[string]any) string {
jsonProps, ok := jsonBody["properties"]
if !ok {
return ""
}
props, ok := jsonProps.(map[string]any)
if !ok {
return ""
}
rawPs, ok := props["provisioningState"]
if !ok {
return ""
}
ps, ok := rawPs.(string)
if !ok {
return ""
}
return ps
}
// status returns the status from the response or the empty string.
func status(jsonBody map[string]any) string {
rawStatus, ok := jsonBody["status"]
if !ok {
return ""
}
status, ok := rawStatus.(string)
if !ok {
return ""
}
return status
}
// GetStatus returns the LRO's status from the response body.
// Typically used for Azure-AsyncOperation flows.
// If there is no status in the response body the empty string is returned.
func GetStatus(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
return status(jsonBody), nil
}
// GetProvisioningState returns the LRO's state from the response body.
// If there is no state in the response body the empty string is returned.
func GetProvisioningState(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
return provisioningState(jsonBody), nil
}
// GetResourceLocation returns the LRO's resourceLocation value from the response body.
// Typically used for Operation-Location flows.
// If there is no resourceLocation in the response body the empty string is returned.
func GetResourceLocation(resp *http.Response) (string, error) {
jsonBody, err := GetJSON(resp)
if err != nil {
return "", err
}
v, ok := jsonBody["resourceLocation"]
if !ok {
// it might be ok if the field doesn't exist, the caller must make that determination
return "", nil
}
vv, ok := v.(string)
if !ok {
return "", fmt.Errorf("the resourceLocation value %v was not in string format", v)
}
return vv, nil
}

View file

@ -1,5 +1,44 @@
# Release History
## 1.1.0 (2023-07-13)
### Features Added
* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch).
* Added support for bearer challenge for identity based managed disks.
* Added support for GetAccountInfo to container and blob level clients.
* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url).
* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL
* Added support for tag permission in Container SAS.
### Bugs Fixed
* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475).
* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value.
* Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735).
### Other Changes
* Add `dragonfly` to the list of build constraints for `blockblob`.
* Updating version of azcore to 1.6.0 and azidentity to 1.3.0
## 1.1.0-beta.1 (2023-05-09)
### Features Added
* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch).
* Added support for bearer challenge for identity based managed disks.
* Added support for GetAccountInfo to container and blob level clients.
* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url).
* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL
* Added support for tag permission in Container SAS.
### Bugs Fixed
* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475).
* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value.
## 1.0.0 (2023-02-07)
### Features Added

View file

@ -1,47 +1,51 @@
# Azure Blob Storage SDK for Go
# Azure Blob Storage module for Go
> Server Version: 2020-10-02
> Service Version: 2020-10-02
Azure Blob storage is Microsoft's object storage solution for the cloud. Blob
storage is optimized for storing massive amounts of unstructured data.
Unstructured data is data that does not adhere to a particular data model or
definition, such as text or binary data.
Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob
Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or
definition, such as text or binary data. For more information, see [Introduction to Azure Blob Storage](https://learn.microsoft.com/azure/storage/blobs/storage-blobs-introduction).
[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs]
Use the Azure Blob Storage client module `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` to:
* Authenticate clients with Azure Blob Storage
* Manipulate containers and blobs in an Azure storage account
Key links:
[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] | [Samples][go_samples]
## Getting started
### Install the package
Install the Azure Blob Storage SDK for Go with [go get][goget]:
```Powershell
go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
```
If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module.
```Powershell
go get github.com/Azure/azure-sdk-for-go/sdk/azidentity
```
### Prerequisites
A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases).
You need an [Azure subscription][azure_sub] and a
[Storage Account][storage_account_docs] to use this package.
To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal],
- Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install)
- Azure subscription - [Create a free account](https://azure.microsoft.com/free/)
- Azure storage account - To create a storage account, use tools including the [Azure portal][storage_account_create_portal],
[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli].
Here's an example using the Azure CLI:
```Powershell
```bash
az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS
```
### Install the package
Install the Azure Blob Storage client module for Go with [go get][goget]:
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
```
If you plan to authenticate with Azure Active Directory (recommended), also install the [azidentity][azidentity] module.
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/azidentity
```
### Authenticate the client
In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services.
To interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services.
```go
// create a credential for authenticating with Azure Active Directory
@ -53,11 +57,17 @@ client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/
// TODO: handle err
```
Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps).
Learn more about enabling Azure Active Directory for authentication with Azure Storage:
* [Authorize access to blobs using Azure Active Directory][storage_ad]
Other options for authentication include connection strings, shared key, shared access signatures (SAS), and anonymous public access. Use the appropriate client constructor function for the authentication mechanism you wish to use. For examples, see:
* [Blob samples][samples]
## Key concepts
Blob storage is designed for:
Blob Storage is designed for:
- Serving images or documents directly to a browser.
- Storing files for distributed access.
@ -66,23 +76,41 @@ Blob storage is designed for:
- Storing data for backup and restore, disaster recovery, and archiving.
- Storing data for analysis by an on-premises or Azure-hosted service.
Blob storage offers three types of resources:
Blob Storage offers three types of resources:
- The _storage account_
- One or more _containers_ in a storage account
- One ore more _blobs_ in a container
- One or more _blobs_ in a container
Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account.
The storage account is specified when the `azblob.Client` is constructed.
Use the appropriate client constructor function for the authentication mechanism you wish to use.
Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go)
### Specialized clients
The Azure Blob Storage client module for Go also provides specialized clients in various subpackages. Use these clients when you need to interact with a specific kind of blob. Learn more about [block blobs, append blobs, and page blobs](https://learn.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs).
- [appendblob][append_blob]
- [blockblob][block_blob]
- [pageblob][page_blob]
The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more.
The [lease][lease] package contains clients for managing leases on blobs and containers. See the [REST API reference](https://learn.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases.
The [container][container] package contains APIs specific to containers. This includes APIs for setting access policies or properties, and more.
The [service][service] package contains APIs specific to the Blob service. This includes APIs for manipulating containers, retrieving account information, and more.
The [sas][sas] package contains utilities to aid in the creation and manipulation of shared access signature (SAS) tokens.
See the package's documentation for more information.
### Goroutine safety
We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines.
### About blob metadata
Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters.
We guarantee that all client instance methods are goroutine-safe and independent of each other (see [guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation to reuse client instances is always safe, even across goroutines.
### Blob metadata
Blob metadata name-value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters.
### Additional concepts
<!-- CLIENT COMMON BAR -->
@ -94,7 +122,7 @@ Blob metadata name/value pairs are valid HTTP headers and should adhere to all r
## Examples
### Uploading a blob
### Upload a blob
```go
const (
@ -122,7 +150,7 @@ _, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil)
// TODO: handle error
```
### Downloading a blob
### Download a blob
```go
// this example accesses a public blob via anonymous access, so no credentials are required
@ -139,7 +167,7 @@ _, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil)
// TODO: handle error
```
### Enumerating blobs
### Enumerate blobs
```go
const (
@ -177,7 +205,7 @@ All Blob service operations will return an
[*azcore.ResponseError][azcore_response_error] on failure with a
populated `ErrorCode` field. Many of these errors are recoverable.
The [bloberror][blob_error] package provides the possible Storage error codes
along with various helper facilities for error handling.
along with helper facilities for error handling.
```go
const (
@ -201,28 +229,7 @@ if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNo
## Next steps
Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more.
### Specialized clients
The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages.
Use these clients when you need to interact with a specific kind of blob.
Learn more about the various types of blobs from the following links.
- [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs)
- [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs)
- [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs)
The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more.
The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases.
The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more.
The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more.
The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens.
See the package's documentation for more information.
Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more.
## Contributing
@ -243,19 +250,20 @@ additional questions or comments.
<!-- LINKS -->
[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob
[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api
[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview
[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#section_documentation
[rest_docs]: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api
[product_docs]: https://learn.microsoft.com/azure/storage/blobs/storage-blobs-overview
[godevdl]: https://go.dev/dl/
[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them
[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview
[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell
[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli
[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal
[azure_cli]: https://docs.microsoft.com/cli/azure
[go_samples]: https://github.com/Azure-Samples/azure-sdk-for-go-samples/tree/main
[storage_account_docs]: https://learn.microsoft.com/azure/storage/common/storage-account-overview
[storage_account_create_ps]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell
[storage_account_create_cli]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli
[storage_account_create_portal]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal
[azure_cli]: https://learn.microsoft.com/cli/azure
[azure_sub]: https://azure.microsoft.com/free/
[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity
[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad
[storage_ad]: https://learn.microsoft.com/azure/storage/common/storage-auth-aad
[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError
[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go
[append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go

View file

@ -8,6 +8,7 @@ package appendblob
import (
"context"
"errors"
"io"
"os"
"time"
@ -22,9 +23,7 @@ import (
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
type ClientOptions base.ClientOptions
// Client represents a client to an Azure Storage append blob;
type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient]
@ -34,7 +33,7 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName,
@ -255,14 +254,10 @@ func (ab *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blo
return ab.BlobClient().SetLegalHold(ctx, legalHold, options)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
// SetTier
// Deprecated: SetTier only works for page blob in premium storage account and block blob in blob storage account.
func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) {
return ab.BlobClient().SetTier(ctx, tier, o)
return blob.SetTierResponse{}, errors.New("operation will not work on this blob type. SetTier only works for page blob in premium storage account and block blob in blob storage account")
}
// SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts.
@ -282,6 +277,12 @@ func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption
return ab.BlobClient().GetProperties(ctx, o)
}
// GetAccountInfo provides account level information
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures.
func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) {
return ab.BlobClient().GetAccountInfo(ctx, o)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
@ -326,10 +327,10 @@ func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.Get
return ab.BlobClient().GetTags(ctx, o)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
// CopyFromURL
// Deprecated: CopyFromURL works only with block blob
func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) {
return ab.BlobClient().CopyFromURL(ctx, copySource, o)
return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob")
}
// Concurrent Download Functions -----------------------------------------------------------------------------------------

View file

@ -100,6 +100,9 @@ func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOpt
// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method.
type AppendBlockFromURLOptions struct {
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string
// SourceContentValidation contains the validation mechanism used on the range of bytes read from the source.
SourceContentValidation blob.SourceContentValidationType
@ -125,7 +128,8 @@ func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendB
}
options := &generated.AppendBlobClientAppendBlockFromURLOptions{
SourceRange: exported.FormatHTTPRange(o.Range),
SourceRange: exported.FormatHTTPRange(o.Range),
CopySourceAuthorization: o.CopySourceAuthorization,
}
if o.SourceContentValidation != nil {

View file

@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/storage/azblob",
"Tag": "go/storage/azblob_46e572d43a"
"Tag": "go/storage/azblob_a772b9c866"
}

View file

@ -25,9 +25,7 @@ import (
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
type ClientOptions base.ClientOptions
// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type Client base.Client[generated.BlobClient]
@ -37,12 +35,12 @@ type Client base.Client[generated.BlobClient]
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
return (*Client)(base.NewBlobClient(blobURL, pl, &cred)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -100,6 +98,10 @@ func (b *Client) sharedKey() *SharedKeyCredential {
return base.SharedKey((*base.Client[generated.BlobClient])(b))
}
func (b *Client) credential() any {
return base.Credential((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (b *Client) URL() string {
return b.generated().Endpoint()
@ -114,7 +116,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) {
}
p.Snapshot = snapshot
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
@ -126,7 +128,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) {
}
p.VersionID = versionID
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
@ -264,6 +266,14 @@ func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *Co
return resp, err
}
// GetAccountInfo provides account level information
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures.
func (b *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) {
getAccountInfoOptions := o.format()
resp, err := b.generated().GetAccountInfo(ctx, getAccountInfoOptions)
return resp, err
}
// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) {
@ -313,12 +323,11 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt
count := o.Range.Count
if count == CountToEnd { // If size not specified, calculate it
// If we don't have the length at all, get it
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil)
dr, err := b.DownloadStream(ctx, downloadBlobOptions)
gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions())
if err != nil {
return 0, err
}
count = *dr.ContentLength - o.Range.Offset
count = *gr.ContentLength - o.Range.Offset
}
if count <= 0 {

View file

@ -565,3 +565,14 @@ func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions,
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions)
return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// GetAccountInfoOptions provides set of options for Client.GetAccountInfo
type GetAccountInfoOptions struct {
// placeholder for future options
}
func (o *GetAccountInfoOptions) format() *generated.BlobClientGetAccountInfoOptions {
return nil
}

View file

@ -100,6 +100,9 @@ type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse
// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL.
type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse
// GetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo.
type GetAccountInfoResponse = generated.BlobClientGetAccountInfoResponse
// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease.
type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse

View file

@ -12,6 +12,7 @@ import (
"encoding/base64"
"errors"
"io"
"math"
"os"
"sync"
"time"
@ -30,9 +31,7 @@ import (
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
type ClientOptions base.ClientOptions
// Client defines a set of operations applicable to block blobs.
type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient]
@ -42,7 +41,7 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
@ -165,6 +164,19 @@ func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *U
return resp, err
}
// UploadBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from
// a given URL. Partial updates are not supported with Put Blob from URL; the content of an existing blob is overwritten
// with the content of the new blob. To perform partial updates to a block blobs contents using a source URL, use the Put
// Block from URL API in conjunction with Put Block List.
// For more information, see https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url
func (bb *Client) UploadBlobFromURL(ctx context.Context, copySource string, options *UploadBlobFromURLOptions) (UploadBlobFromURLResponse, error) {
opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions := options.format()
resp, err := bb.generated().PutBlobFromURL(ctx, int64(0), copySource, opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions)
return resp, err
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
@ -316,6 +328,12 @@ func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption
return bb.BlobClient().GetProperties(ctx, o)
}
// GetAccountInfo provides account level information
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures.
func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) {
return bb.BlobClient().GetAccountInfo(ctx, o)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
@ -370,31 +388,26 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co
// uploadFromReader uploads a buffer in blocks to a block blob.
func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) {
readerSize := actualSize
if o.BlockSize == 0 {
// If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error
if readerSize > MaxStageBlockBytes*MaxBlocks {
if actualSize > MaxStageBlockBytes*MaxBlocks {
return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob")
}
// If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request
if readerSize <= MaxUploadBlobBytes {
if actualSize <= MaxUploadBlobBytes {
o.BlockSize = MaxUploadBlobBytes // Default if unspecified
} else {
if remainder := readerSize % MaxBlocks; remainder > 0 {
// ensure readerSize is a multiple of MaxBlocks
readerSize += (MaxBlocks - remainder)
}
o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
o.BlockSize = int64(math.Ceil(float64(actualSize) / MaxBlocks)) // ceil(buffer / max blocks) = block size to use all 50,000 blocks
if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
o.BlockSize = blob.DefaultDownloadBlockSize
}
// StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize).
}
}
if readerSize <= MaxUploadBlobBytes {
if actualSize <= MaxUploadBlobBytes {
// If the size can fit in 1 Upload call, do it this way
var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize)
var body io.ReadSeeker = io.NewSectionReader(reader, 0, actualSize)
if o.Progress != nil {
body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress)
}
@ -405,7 +418,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu
return toUploadReaderAtResponseFromUploadResponse(resp), err
}
var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1)
var numBlocks = uint16(((actualSize - 1) / o.BlockSize) + 1)
if numBlocks > MaxBlocks {
// prevent any math bugs from attempting to upload too many blocks which will always fail
return uploadFromReaderResponse{}, errors.New("block limit exceeded")
@ -425,7 +438,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu
err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{
OperationName: "uploadFromReader",
TransferSize: readerSize,
TransferSize: actualSize,
ChunkSize: o.BlockSize,
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, offset int64, chunkSize int64) error {

View file

@ -1,6 +1,6 @@
//go:build go1.18 && (linux || darwin || freebsd || openbsd || netbsd || solaris)
//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix)
// +build go1.18
// +build linux darwin freebsd openbsd netbsd solaris
// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.

View file

@ -26,7 +26,9 @@ func newMMB(size int64) (mmb, error) {
if err != nil {
return nil, os.NewSyscallError("CreateFileMapping", err)
}
defer syscall.CloseHandle(hMMF)
defer func() {
_ = syscall.CloseHandle(hMMF)
}()
addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size))
if err != nil {

View file

@ -70,6 +70,56 @@ func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *gene
// ---------------------------------------------------------------------------------------------------------------------
// UploadBlobFromURLOptions contains the optional parameters for the Client.UploadBlobFromURL method.
type UploadBlobFromURLOptions struct {
// Optional. Used to set blob tags in various blob operations.
Tags map[string]string
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string
// Optional, default is true. Indicates if properties from the source blob should be copied.
CopySourceBlobProperties *bool
// Optional. Specifies a user-defined name-value pair associated with the blob.
Metadata map[string]*string
// Optional. Specifies the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Optional. Indicates the tier to be set on the blob.
Tier *blob.AccessTier
// Additional optional headers
HTTPHeaders *blob.HTTPHeaders
AccessConditions *blob.AccessConditions
CPKInfo *blob.CPKInfo
CPKScopeInfo *blob.CPKScopeInfo
SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions
}
func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFromURLOptions, *generated.BlobHTTPHeaders,
*generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions,
*generated.SourceModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil, nil
}
options := generated.BlockBlobClientPutBlobFromURLOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags),
CopySourceAuthorization: o.CopySourceAuthorization,
CopySourceBlobProperties: o.CopySourceBlobProperties,
Metadata: o.Metadata,
SourceContentMD5: o.SourceContentMD5,
Tier: o.Tier,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, o.SourceModifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// StageBlockOptions contains the optional parameters for the Client.StageBlock method.
type StageBlockOptions struct {
CPKInfo *blob.CPKInfo

View file

@ -16,6 +16,9 @@ import (
// UploadResponse contains the response from method Client.Upload.
type UploadResponse = generated.BlockBlobClientUploadResponse
// UploadBlobFromURLResponse contains the response from the method Client.UploadBlobFromURL
type UploadBlobFromURLResponse = generated.BlockBlobClientPutBlobFromURLResponse
// StageBlockResponse contains the response from method Client.StageBlock.
type StageBlockResponse = generated.BlockBlobClientStageBlockResponse

View file

@ -26,3 +26,8 @@ stages:
parameters:
ServiceDirectory: 'storage/azblob'
RunLiveTests: true
EnvVars:
AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID)
AZURE_TENANT_ID: $(AZBLOB_TENANT_ID)
AZURE_CLIENT_SECRET: $(AZBLOB_CLIENT_SECRET)
AZURE_SUBSCRIPTION_ID: $(AZBLOB_SUBSCRIPTION_ID)

View file

@ -13,14 +13,13 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
type ClientOptions base.ClientOptions
// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type Client struct {

View file

@ -0,0 +1,94 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package container
import (
"context"
"fmt"
"net/url"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
)
// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests.
// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier.
type BatchBuilder struct {
endpoint string
authPolicy policy.Policy
subRequests []*policy.Request
operationType *exported.BlobBatchOperationType
}
func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error {
if bb.operationType == nil {
bb.operationType = &operationType
return nil
}
if *bb.operationType != operationType {
return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType)
}
return nil
}
// Delete operation is used to add delete sub-request to the batch builder.
func (bb *BatchBuilder) Delete(blobName string, options *BatchDeleteOptions) error {
err := bb.checkOperationType(exported.BatchDeleteOperationType)
if err != nil {
return err
}
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(bb.endpoint, blobName)
blobClient, err := blob.NewClientWithNoCredential(blobURL, nil)
if err != nil {
return err
}
deleteOptions, leaseInfo, accessConditions := options.format()
req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions)
if err != nil {
return err
}
// remove x-ms-version header
exported.UpdateSubRequestHeaders(req)
bb.subRequests = append(bb.subRequests, req)
return nil
}
// SetTier operation is used to add set tier sub-request to the batch builder.
func (bb *BatchBuilder) SetTier(blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error {
err := bb.checkOperationType(exported.BatchSetTierOperationType)
if err != nil {
return err
}
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(bb.endpoint, blobName)
blobClient, err := blob.NewClientWithNoCredential(blobURL, nil)
if err != nil {
return err
}
setTierOptions, leaseInfo, accessConditions := options.format()
req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions)
if err != nil {
return err
}
// remove x-ms-version header
exported.UpdateSubRequestHeaders(req)
bb.subRequests = append(bb.subRequests, req)
return nil
}

View file

@ -7,7 +7,11 @@
package container
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"net/http"
"net/url"
@ -28,9 +32,7 @@ import (
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
type ClientOptions base.ClientOptions
// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs.
type Client base.Client[generated.ContainerClient]
@ -40,12 +42,12 @@ type Client base.Client[generated.ContainerClient]
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil
return (*Client)(base.NewContainerClient(containerURL, pl, &cred)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -102,6 +104,15 @@ func (c *Client) sharedKey() *SharedKeyCredential {
return base.SharedKey((*base.Client[generated.ContainerClient])(c))
}
func (c *Client) credential() any {
return base.Credential((*base.Client[generated.ContainerClient])(c))
}
// helper method to return the generated.BlobClient which is used for creating the sub-requests
func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient {
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (c *Client) URL() string {
return c.generated().Endpoint()
@ -113,7 +124,7 @@ func (c *Client) URL() string {
func (c *Client) NewBlobClient(blobName string) *blob.Client {
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.credential()))
}
// NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of
@ -190,7 +201,7 @@ func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, op
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) {
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
// This allows us to not expose a GetProperties method at all simplifying the API.
// This allows us to not expose a GetMetadata method at all simplifying the API.
// The optionals are nil, like they were in track 1.5
opts, leaseAccessConditions := o.format()
@ -226,6 +237,14 @@ func (c *Client) SetAccessPolicy(ctx context.Context, o *SetAccessPolicyOptions)
return resp, err
}
// GetAccountInfo provides account level information
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures.
func (c *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) {
getAccountInfoOptions := o.format()
resp, err := c.generated().GetAccountInfo(ctx, getAccountInfoOptions)
return resp, err
}
// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
@ -329,3 +348,67 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Tim
return endpoint, nil
}
// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client.
// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests.
// All sub-requests in the batch must be of the same type, either delete or set tier.
func (c *Client) NewBatchBuilder() (*BatchBuilder, error) {
var authPolicy policy.Policy
switch cred := c.credential().(type) {
case *azcore.TokenCredential:
authPolicy = shared.NewStorageChallengePolicy(*cred)
case *SharedKeyCredential:
authPolicy = exported.NewSharedKeyCredPolicy(cred)
case nil:
// for authentication using SAS
authPolicy = nil
default:
return nil, fmt.Errorf("unrecognised authentication type %T", cred)
}
return &BatchBuilder{
endpoint: c.URL(),
authPolicy: authPolicy,
}, nil
}
// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request.
// It builds the request body using the BatchBuilder object passed.
// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch.
func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) {
if bb == nil || len(bb.subRequests) == 0 {
return SubmitBatchResponse{}, errors.New("batch builder is empty")
}
// create the request body
batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{
AuthPolicy: bb.authPolicy,
SubRequests: bb.subRequests,
})
if err != nil {
return SubmitBatchResponse{}, err
}
reader := bytes.NewReader(batchReq)
rsc := streaming.NopCloser(reader)
multipartContentType := "multipart/mixed; boundary=" + batchID
resp, err := c.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format())
if err != nil {
return SubmitBatchResponse{}, err
}
batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests)
if err != nil {
return SubmitBatchResponse{}, err
}
return SubmitBatchResponse{
Responses: batchResponses,
ContentType: resp.ContentType,
RequestID: resp.RequestID,
Version: resp.Version,
}, nil
}

View file

@ -7,6 +7,7 @@
package container
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"reflect"
"time"
@ -329,3 +330,70 @@ func formatTime(c *SignedIdentifier) error {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// GetAccountInfoOptions provides set of options for Client.GetAccountInfo
type GetAccountInfoOptions struct {
// placeholder for future options
}
func (o *GetAccountInfoOptions) format() *generated.ContainerClientGetAccountInfoOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method.
type BatchDeleteOptions struct {
blob.DeleteOptions
VersionID *string
Snapshot *string
}
func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
basics := generated.BlobClientDeleteOptions{
DeleteSnapshots: o.DeleteSnapshots,
DeleteType: o.BlobDeleteType, // None by default
Snapshot: o.Snapshot,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, leaseAccessConditions, modifiedAccessConditions
}
// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method.
type BatchSetTierOptions struct {
blob.SetTierOptions
VersionID *string
Snapshot *string
}
func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
basics := generated.BlobClientSetTierOptions{
RehydratePriority: o.RehydratePriority,
Snapshot: o.Snapshot,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, leaseAccessConditions, modifiedAccessConditions
}
// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method.
type SubmitBatchOptions struct {
// placeholder for future options
}
func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions {
return nil
}

View file

@ -7,6 +7,7 @@
package container
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
@ -42,3 +43,24 @@ type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse
// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy.
type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse
// GetAccountInfoResponse contains the response from method Client.GetAccountInfo.
type GetAccountInfoResponse = generated.ContainerClientGetAccountInfoResponse
// SubmitBatchResponse contains the response from method Client.SubmitBatch.
type SubmitBatchResponse struct {
// Responses contains the responses of the sub-requests in the batch
Responses []*BatchResponseItem
// ContentType contains the information returned from the Content-Type header response.
ContentType *string
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
// Version contains the information returned from the x-ms-version header response.
Version *string
}
// BatchResponseItem contains the response for the individual sub-requests.
type BatchResponseItem = exported.BatchResponseItem

View file

@ -51,7 +51,7 @@ Use the key as the credential parameter to authenticate the client:
cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil)
serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil)
handle(err)
fmt.Println(serviceClient.URL())
@ -59,11 +59,12 @@ Use the key as the credential parameter to authenticate the client:
Using a Connection String
Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately.
To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method.
To do this, pass the connection string to the service client's `NewClientFromConnectionString` method.
The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section.
connStr := "DefaultEndpointsProtocol=https;AccountName=<my_account_name>;AccountKey=<my_account_key>;EndpointSuffix=core.windows.net"
serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil)
serviceClient, err := azblob.NewClientFromConnectionString(connStr, nil)
handle(err)
Using a Shared Access Signature (SAS) Token
@ -82,20 +83,20 @@ You can generate a SAS token from the Azure Portal under Shared Access Signature
cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil)
serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil)
handle(err)
fmt.Println(serviceClient.URL())
// Alternatively, you can create SAS on the fly
resources := azblob.AccountSASResourceTypes{Service: true}
permission := azblob.AccountSASPermissions{Read: true}
resources := sas.AccountResourceTypes{Service: true}
permission := sas.AccountPermissions{Read: true}
start := time.Now()
expiry := start.AddDate(0, 0, 1)
serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry)
serviceURLWithSAS, err := serviceClient.ServiceClient().GetSASURL(resources, permission, expiry, &service.GetSASURLOptions{StartTime: &start})
handle(err)
serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil)
serviceClientWithSAS, err := azblob.NewClientWithNoCredential(serviceURLWithSAS, nil)
handle(err)
fmt.Println(serviceClientWithSAS.URL())
@ -135,13 +136,13 @@ Examples
handle(err)
// The service URL for blob endpoints is usually in the form: http(s)://<account>.blob.core.windows.net/
serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
serviceClient, err := azblob.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
handle(err)
// ===== 1. Create a container =====
// First, create a container client, and use the Create method to create a new container in your account
containerClient, err := serviceClient.NewContainerClient("testcontainer")
containerClient := serviceClient.ServiceClient().NewContainerClient("testcontainer")
handle(err)
// All APIs have an options' bag struct as a parameter.
@ -154,13 +155,13 @@ Examples
uploadData := "Hello world!"
// Create a new blockBlobClient from the containerClient
blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt")
blockBlobClient := containerClient.NewBlockBlobClient("HelloWorld.txt")
handle(err)
// Upload data to the block blob
blockBlobUploadOptions := azblob.BlockBlobUploadOptions{
Metadata: map[string]string{"Foo": "Bar"},
TagsMap: map[string]string{"Year": "2022"},
blockBlobUploadOptions := blockblob.UploadOptions{
Metadata: map[string]*string{"Foo": to.Ptr("Bar")},
Tags: map[string]string{"Year": "2022"},
}
_, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions)
handle(err)
@ -175,10 +176,9 @@ Examples
downloadData, err := io.ReadAll(reader)
handle(err)
if string(downloadData) != uploadData {
handle(errors.New("Uploaded data should be same as downloaded data"))
handle(errors.New("uploaded data should be same as downloaded data"))
}
if err = reader.Close(); err != nil {
handle(err)
return
@ -189,18 +189,15 @@ Examples
// To iterate over a page use the NextPage(context.Context) to fetch the next page of results.
// PageResponse() can be used to iterate over the results of the specific page.
// Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results.
pager := containerClient.ListBlobsFlat(nil)
for pager.NextPage(context.TODO()) {
resp := pager.PageResponse()
pager := containerClient.NewListBlobsFlatPager(nil)
for pager.More() {
resp, err := pager.NextPage(context.TODO())
handle(err)
for _, v := range resp.Segment.BlobItems {
fmt.Println(*v.Name)
}
}
if err = pager.Err(); err != nil {
handle(err)
}
// Delete the blob.
_, err = blockBlobClient.Delete(context.TODO(), nil)
handle(err)

View file

@ -7,14 +7,20 @@
package base
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
type Client[T any] struct {
inner *T
sharedKey *exported.SharedKeyCredential
inner *T
credential any
}
func InnerClient[T any](client *Client[T]) *T {
@ -22,31 +28,40 @@ func InnerClient[T any](client *Client[T]) *T {
}
func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential {
return client.sharedKey
switch cred := client.credential.(type) {
case *exported.SharedKeyCredential:
return cred
default:
return nil
}
}
func Credential[T any](client *Client[T]) any {
return client.credential
}
func NewClient[T any](inner *T) *Client[T] {
return &Client[T]{inner: inner}
}
func NewServiceClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] {
func NewServiceClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ServiceClient] {
return &Client[generated.ServiceClient]{
inner: generated.NewServiceClient(containerURL, pipeline),
sharedKey: sharedKey,
inner: generated.NewServiceClient(containerURL, pipeline),
credential: credential,
}
}
func NewContainerClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ContainerClient] {
func NewContainerClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ContainerClient] {
return &Client[generated.ContainerClient]{
inner: generated.NewContainerClient(containerURL, pipeline),
sharedKey: sharedKey,
inner: generated.NewContainerClient(containerURL, pipeline),
credential: credential,
}
}
func NewBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.BlobClient] {
func NewBlobClient(blobURL string, pipeline runtime.Pipeline, credential any) *Client[generated.BlobClient] {
return &Client[generated.BlobClient]{
inner: generated.NewBlobClient(blobURL, pipeline),
sharedKey: sharedKey,
inner: generated.NewBlobClient(blobURL, pipeline),
credential: credential,
}
}

View file

@ -0,0 +1,279 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package exported
import (
"bufio"
"bytes"
"errors"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"io"
"mime"
"mime/multipart"
"net/http"
"net/textproto"
"strconv"
"strings"
)
const (
batchIdPrefix = "batch_"
httpVersion = "HTTP/1.1"
httpNewline = "\r\n"
)
// createBatchID is used for creating a new batch id which is used as batch boundary in the request body
func createBatchID() (string, error) {
batchID, err := uuid.New()
if err != nil {
return "", err
}
return batchIdPrefix + batchID.String(), nil
}
// buildSubRequest is used for building the sub-request. Example:
// DELETE /container0/blob0 HTTP/1.1
// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT
// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE=
// Content-Length: 0
func buildSubRequest(req *policy.Request) []byte {
var batchSubRequest strings.Builder
blobPath := req.Raw().URL.Path
if len(req.Raw().URL.RawQuery) > 0 {
blobPath += "?" + req.Raw().URL.RawQuery
}
batchSubRequest.WriteString(fmt.Sprintf("%s %s %s%s", req.Raw().Method, blobPath, httpVersion, httpNewline))
for k, v := range req.Raw().Header {
if strings.EqualFold(k, shared.HeaderXmsVersion) {
continue
}
if len(v) > 0 {
batchSubRequest.WriteString(fmt.Sprintf("%v: %v%v", k, v[0], httpNewline))
}
}
batchSubRequest.WriteString(httpNewline)
return []byte(batchSubRequest.String())
}
// CreateBatchRequest creates a new batch request using the sub-requests present in the BlobBatchBuilder.
//
// Example of a sub-request in the batch request body:
//
// --batch_357de4f7-6d0b-4e02-8cd2-6361411a9525
// Content-Type: application/http
// Content-Transfer-Encoding: binary
// Content-ID: 0
//
// DELETE /container0/blob0 HTTP/1.1
// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT
// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE=
// Content-Length: 0
func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) {
batchID, err := createBatchID()
if err != nil {
return nil, "", err
}
// Create a new multipart buffer
reqBody := &bytes.Buffer{}
writer := multipart.NewWriter(reqBody)
// Set the boundary
err = writer.SetBoundary(batchID)
if err != nil {
return nil, "", err
}
partHeaders := make(textproto.MIMEHeader)
partHeaders["Content-Type"] = []string{"application/http"}
partHeaders["Content-Transfer-Encoding"] = []string{"binary"}
var partWriter io.Writer
for i, req := range bb.SubRequests {
if bb.AuthPolicy != nil {
_, err := bb.AuthPolicy.Do(req)
if err != nil && !strings.EqualFold(err.Error(), "no more policies") {
if log.Should(EventSubmitBatch) {
log.Writef(EventSubmitBatch, "failed to authorize sub-request for %v.\nError: %v", req.Raw().URL.Path, err.Error())
}
return nil, "", err
}
}
partHeaders["Content-ID"] = []string{fmt.Sprintf("%v", i)}
partWriter, err = writer.CreatePart(partHeaders)
if err != nil {
return nil, "", err
}
_, err = partWriter.Write(buildSubRequest(req))
if err != nil {
return nil, "", err
}
}
// Close the multipart writer
err = writer.Close()
if err != nil {
return nil, "", err
}
return reqBody.Bytes(), batchID, nil
}
// UpdateSubRequestHeaders updates the sub-request headers.
// Removes x-ms-version header.
func UpdateSubRequestHeaders(req *policy.Request) {
// remove x-ms-version header from the request header
for k := range req.Raw().Header {
if strings.EqualFold(k, shared.HeaderXmsVersion) {
delete(req.Raw().Header, k)
}
}
}
// BatchResponseItem contains the response for the individual sub-requests.
type BatchResponseItem struct {
ContentID *int
ContainerName *string
BlobName *string
RequestID *string
Version *string
Error error // nil error indicates that the batch sub-request operation is successful
}
func getResponseBoundary(contentType *string) (string, error) {
if contentType == nil {
return "", fmt.Errorf("Content-Type returned in SubmitBatch response is nil")
}
_, params, err := mime.ParseMediaType(*contentType)
if err != nil {
return "", err
}
if val, ok := params["boundary"]; ok {
return val, nil
} else {
return "", fmt.Errorf("batch boundary not present in Content-Type header of the SubmitBatch response.\nContent-Type: %v", *contentType)
}
}
func getContentID(part *multipart.Part) (*int, error) {
contentID := part.Header.Get("Content-ID")
if contentID == "" {
return nil, nil
}
val, err := strconv.Atoi(strings.TrimSpace(contentID))
if err != nil {
return nil, err
}
return &val, nil
}
func getResponseHeader(key string, resp *http.Response) *string {
val := resp.Header.Get(key)
if val == "" {
return nil
}
return &val
}
// ParseBlobBatchResponse is used for parsing the batch response body into individual sub-responses for each item in the batch.
func ParseBlobBatchResponse(respBody io.ReadCloser, contentType *string, subRequests []*policy.Request) ([]*BatchResponseItem, error) {
boundary, err := getResponseBoundary(contentType)
if err != nil {
return nil, err
}
respReader := multipart.NewReader(respBody, boundary)
var responses []*BatchResponseItem
for {
part, err := respReader.NextPart()
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return nil, err
}
batchSubResponse := &BatchResponseItem{}
batchSubResponse.ContentID, err = getContentID(part)
if err != nil {
return nil, err
}
if batchSubResponse.ContentID != nil {
path := strings.Trim(subRequests[*batchSubResponse.ContentID].Raw().URL.Path, "/")
p := strings.Split(path, "/")
batchSubResponse.ContainerName = to.Ptr(p[0])
batchSubResponse.BlobName = to.Ptr(strings.Join(p[1:], "/"))
}
respBytes, err := io.ReadAll(part)
if err != nil {
return nil, err
}
respBytes = append(respBytes, byte('\n'))
buf := bytes.NewBuffer(respBytes)
resp, err := http.ReadResponse(bufio.NewReader(buf), nil)
// sub-response parsing error
if err != nil {
return nil, err
}
batchSubResponse.RequestID = getResponseHeader(shared.HeaderXmsRequestID, resp)
batchSubResponse.Version = getResponseHeader(shared.HeaderXmsVersion, resp)
// sub-response failure
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
if len(responses) == 0 && batchSubResponse.ContentID == nil {
// this case can happen when the parent request fails.
// For example, batch request having more than 256 sub-requests.
return nil, fmt.Errorf("%v", string(respBytes))
}
resp.Request = subRequests[*batchSubResponse.ContentID].Raw()
batchSubResponse.Error = runtime.NewResponseError(resp)
}
responses = append(responses, batchSubResponse)
}
if len(responses) != len(subRequests) {
return nil, fmt.Errorf("expected %v responses, got %v for the batch ID: %v", len(subRequests), len(responses), boundary)
}
return responses, nil
}
// not exported but used for batch request creation
// BlobBatchBuilder is used for creating the blob batch request
type BlobBatchBuilder struct {
AuthPolicy policy.Policy
SubRequests []*policy.Request
}
// BlobBatchOperationType defines the operation of the blob batch sub-requests.
type BlobBatchOperationType string
const (
BatchDeleteOperationType BlobBatchOperationType = "delete"
BatchSetTierOperationType BlobBatchOperationType = "set tier"
)

View file

@ -1,5 +1,8 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Licensed under the MIT License. See License.txt in the project root for license information.
package exported
@ -11,4 +14,7 @@ import (
const (
// EventUpload is used when we compute number of blocks to upload and size of each block.
EventUpload log.Event = "azblob.Upload"
// EventSubmitBatch is used for logging events related to submit blob batch operation.
EventSubmitBatch log.Event = "azblob.SubmitBatch"
)

View file

@ -8,5 +8,5 @@ package exported
const (
ModuleName = "azblob"
ModuleVersion = "v1.0.0"
ModuleVersion = "v1.1.0"
)

View file

@ -30,7 +30,7 @@ directive:
where: $
transform: >-
return $.
replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`).
replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `//\n// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`).
replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`).
replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`);
```
@ -43,7 +43,7 @@ directive:
where: $
transform: >-
return $.
replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `// listContainersSegmentCreateRequest creates the ListContainersSegment request`).
replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `//\n// listContainersSegmentCreateRequest creates the ListContainersSegment request`).
replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`).
replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`);
```
@ -384,4 +384,54 @@ directive:
transform: >-
return $.
replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\"");
```
```
### Fix Content-Type header in submit batch request
``` yaml
directive:
- from:
- zz_container_client.go
- zz_service_client.go
where: $
transform: >-
return $.
replace (/req.SetBody\(body\,\s+\"application\/xml\"\)/g, `req.SetBody(body, multipartContentType)`);
```
### Fix response status code check in submit batch request
``` yaml
directive:
- from: zz_service_client.go
where: $
transform: >-
return $.
replace(/if\s+!runtime\.HasStatusCode\(resp,\s+http\.StatusOK\)\s+\{\s*\n\t\treturn\s+ServiceClientSubmitBatchResponse\{\}\,\s+runtime\.NewResponseError\(resp\)\s*\n\t\}/g,
`if !runtime.HasStatusCode(resp, http.StatusAccepted) {\n\t\treturn ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)\n\t}`);
```
### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers
``` yaml
directive:
- from:
- zz_container_client.go
- zz_blob_client.go
- zz_appendblob_client.go
- zz_blockblob_client.go
- zz_pageblob_client.go
where: $
transform: >-
return $.
replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g,
`req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`).
replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g,
`req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`).
replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g,
`req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`).
replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g,
`req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`).
replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g,
`req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`);

View file

@ -6,7 +6,15 @@
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
import (
"context"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"time"
)
// used to convert times from UTC to GMT before sending across the wire
var gmt = time.FixedZone("GMT", 0)
func (client *BlobClient) Endpoint() string {
return client.endpoint
@ -15,3 +23,11 @@ func (client *BlobClient) Endpoint() string {
func (client *BlobClient) Pipeline() runtime.Pipeline {
return client.pl
}
func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
return client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions)
}
func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions)
}

View file

@ -110,10 +110,10 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -283,10 +283,10 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont
req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -298,10 +298,10 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
@ -467,10 +467,10 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -489,7 +489,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
@ -601,10 +601,10 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}

View file

@ -152,10 +152,10 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio
req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -247,10 +247,10 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *
req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -350,10 +350,10 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -458,10 +458,10 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour
req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
@ -470,10 +470,10 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -499,7 +499,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
@ -625,10 +625,10 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -754,10 +754,10 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob
req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -925,10 +925,10 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -1277,10 +1277,10 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -1670,10 +1670,10 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -1883,10 +1883,10 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID
req.Raw().Header["x-ms-lease-action"] = []string{"release"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -1974,10 +1974,10 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s
req.Raw().Header["x-ms-lease-action"] = []string{"renew"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -2162,10 +2162,10 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -2265,10 +2265,10 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
@ -2440,10 +2440,10 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -2719,10 +2719,10 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
@ -2734,10 +2734,10 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -2763,7 +2763,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}

View file

@ -134,10 +134,10 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -156,7 +156,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
@ -424,10 +424,10 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -439,10 +439,10 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
@ -721,10 +721,10 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
@ -882,10 +882,10 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -904,7 +904,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}

View file

@ -86,10 +86,10 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du
req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
@ -174,10 +174,10 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti
req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
@ -270,10 +270,10 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
@ -447,10 +447,10 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
@ -963,10 +963,10 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le
req.Raw().Header["x-ms-lease-action"] = []string{"release"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
@ -1115,10 +1115,10 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas
req.Raw().Header["x-ms-lease-action"] = []string{"renew"}
req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
@ -1277,10 +1277,10 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context,
req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
@ -1372,7 +1372,7 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt
}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
@ -1459,7 +1459,7 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/xml")
return req, req.SetBody(body, multipartContentType)
}
// submitBatchHandleResponse handles the SubmitBatch response.

View file

@ -108,10 +108,10 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte
req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -225,10 +225,10 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context,
}
req.Raw().URL.RawQuery = reqQP.Encode()
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -371,10 +371,10 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -397,7 +397,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
@ -528,10 +528,10 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -662,10 +662,10 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -780,10 +780,10 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -883,10 +883,10 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -1024,10 +1024,10 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont
req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -1196,10 +1196,10 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
@ -1211,10 +1211,10 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}

View file

@ -513,7 +513,7 @@ func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int6
if err != nil {
return ServiceClientSubmitBatchResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)
}
return client.submitBatchHandleResponse(resp)
@ -539,7 +539,7 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/xml")
return req, req.SetBody(body, multipartContentType)
}
// submitBatchHandleResponse handles the SubmitBatch response.

View file

@ -0,0 +1,113 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package shared
import (
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"net/http"
"strings"
)
type storageAuthorizer struct {
scopes []string
tenantID string
}
func NewStorageChallengePolicy(cred azcore.TokenCredential) policy.Policy {
s := storageAuthorizer{scopes: []string{TokenScope}}
return runtime.NewBearerTokenPolicy(cred, []string{TokenScope}, &policy.BearerTokenOptions{
AuthorizationHandler: policy.AuthorizationHandler{
OnRequest: s.onRequest,
OnChallenge: s.onChallenge,
},
})
}
func (s *storageAuthorizer) onRequest(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error {
return authNZ(policy.TokenRequestOptions{Scopes: s.scopes})
}
func (s *storageAuthorizer) onChallenge(req *policy.Request, resp *http.Response, authNZ func(policy.TokenRequestOptions) error) error {
// parse the challenge
err := s.parseChallenge(resp)
if err != nil {
return err
}
// TODO: Set tenantID when policy.TokenRequestOptions supports it. https://github.com/Azure/azure-sdk-for-go/issues/19841
return authNZ(policy.TokenRequestOptions{Scopes: s.scopes})
}
type challengePolicyError struct {
err error
}
func (c *challengePolicyError) Error() string {
return c.err.Error()
}
func (*challengePolicyError) NonRetriable() {
// marker method
}
func (c *challengePolicyError) Unwrap() error {
return c.err
}
// parses Tenant ID from auth challenge
// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/authorize
func parseTenant(url string) string {
if url == "" {
return ""
}
parts := strings.Split(url, "/")
if len(parts) >= 3 {
tenant := parts[3]
tenant = strings.ReplaceAll(tenant, ",", "")
return tenant
} else {
return ""
}
}
func (s *storageAuthorizer) parseChallenge(resp *http.Response) error {
authHeader := resp.Header.Get("WWW-Authenticate")
if authHeader == "" {
return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")}
}
// Strip down to auth and resource
// Format is "Bearer authorization_uri=\"<site>\" resource_id=\"<site>\""
authHeader = strings.ReplaceAll(authHeader, "Bearer ", "")
parts := strings.Split(authHeader, " ")
vals := map[string]string{}
for _, part := range parts {
subParts := strings.Split(part, "=")
if len(subParts) == 2 {
stripped := strings.ReplaceAll(subParts[1], "\"", "")
stripped = strings.TrimSuffix(stripped, ",")
vals[subParts[0]] = stripped
}
}
s.tenantID = parseTenant(vals["authorization_uri"])
scope := vals["resource_id"]
if scope == "" {
return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")}
}
if !strings.HasSuffix(scope, "/.default") {
scope += "/.default"
}
s.scopes = []string{scope}
return nil
}

View file

@ -38,6 +38,8 @@ const (
HeaderIfNoneMatch = "If-None-Match"
HeaderIfUnmodifiedSince = "If-Unmodified-Since"
HeaderRange = "Range"
HeaderXmsVersion = "x-ms-version"
HeaderXmsRequestID = "x-ms-request-id"
)
const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5
@ -85,22 +87,6 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err
connStrMap[parts[0]] = parts[1]
}
accountName, ok := connStrMap["AccountName"]
if !ok {
return ParsedConnectionString{}, errors.New("connection string missing AccountName")
}
accountKey, ok := connStrMap["AccountKey"]
if !ok {
sharedAccessSignature, ok := connStrMap["SharedAccessSignature"]
if !ok {
return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature")
}
return ParsedConnectionString{
ServiceURL: fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature),
}, nil
}
protocol, ok := connStrMap["DefaultEndpointsProtocol"]
if !ok {
protocol = defaultScheme
@ -111,24 +97,45 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err
suffix = defaultSuffix
}
if blobEndpoint, ok := connStrMap["BlobEndpoint"]; ok {
blobEndpoint, has_blobEndpoint := connStrMap["BlobEndpoint"]
accountName, has_accountName := connStrMap["AccountName"]
var serviceURL string
if has_blobEndpoint {
serviceURL = blobEndpoint
} else if has_accountName {
serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix)
} else {
return ParsedConnectionString{}, errors.New("connection string needs either AccountName or BlobEndpoint")
}
if !strings.HasSuffix(serviceURL, "/") {
// add a trailing slash to be consistent with the portal
serviceURL += "/"
}
accountKey, has_accountKey := connStrMap["AccountKey"]
sharedAccessSignature, has_sharedAccessSignature := connStrMap["SharedAccessSignature"]
if has_accountName && has_accountKey {
return ParsedConnectionString{
ServiceURL: blobEndpoint,
ServiceURL: serviceURL,
AccountName: accountName,
AccountKey: accountKey,
}, nil
} else if has_sharedAccessSignature {
return ParsedConnectionString{
ServiceURL: fmt.Sprintf("%v?%v", serviceURL, sharedAccessSignature),
}, nil
} else {
return ParsedConnectionString{}, errors.New("connection string needs either AccountKey or SharedAccessSignature")
}
return ParsedConnectionString{
ServiceURL: fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix),
AccountName: accountName,
AccountKey: accountKey,
}, nil
}
// SerializeBlobTags converts tags to generated.BlobTags
func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags {
if tagsMap == nil {
if len(tagsMap) == 0 {
return nil
}
blobTagSet := make([]*generated.BlobTag, 0)
@ -140,7 +147,7 @@ func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags {
}
func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string {
if tagsMap == nil {
if len(tagsMap) == 0 {
return nil
}
tags := make([]string, 0)

View file

@ -3,9 +3,14 @@
package azblob
import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
)
const (
// EventUpload is used for logging events related to upload operation.
EventUpload = exported.EventUpload
// EventSubmitBatch is used for logging events related to submit blob batch operation.
EventSubmitBatch = exported.EventSubmitBatch
)

View file

@ -25,9 +25,7 @@ import (
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
type ClientOptions base.ClientOptions
// Client represents a client to an Azure Storage page blob;
type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient]
@ -37,7 +35,7 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient]
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
@ -363,6 +361,12 @@ func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption
return pb.BlobClient().GetProperties(ctx, o)
}
// GetAccountInfo provides account level information
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures.
func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) {
return pb.BlobClient().GetAccountInfo(ctx, o)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {

View file

@ -29,9 +29,9 @@ type AccountSignatureValues struct {
Protocol Protocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String()
IPRange IPRange `param:"sip"`
ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String()
}
// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce
@ -50,6 +50,12 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey
}
v.Permissions = perms.String()
resources, err := parseAccountResourceTypes(v.ResourceTypes)
if err != nil {
return QueryParameters{}, err
}
v.ResourceTypes = resources.String()
startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{})
stringToSign := strings.Join([]string{
@ -90,13 +96,13 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey
}
// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call Client.GetSASURL with it or use the String method to set AccountSASSignatureValues Permissions field.
// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field.
type AccountPermissions struct {
Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, FilterByTags, Tag, SetImmutabilityPolicy bool
}
// String produces the SAS permissions string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues' Permissions field.
// Call this method to set AccountSignatureValues' Permissions field.
func (p *AccountPermissions) String() string {
var buffer bytes.Buffer
if p.Read {
@ -141,7 +147,7 @@ func (p *AccountPermissions) String() string {
return buffer.String()
}
// Parse initializes the AccountSASPermissions' fields from a string.
// Parse initializes the AccountPermissions' fields from a string.
func parseAccountPermissions(s string) (AccountPermissions, error) {
p := AccountPermissions{} // Clear out the flags
for _, r := range s {
@ -180,13 +186,13 @@ func parseAccountPermissions(s string) (AccountPermissions, error) {
}
// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues' ResourceTypes field.
// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field.
type AccountResourceTypes struct {
Service, Container, Object bool
}
// String produces the SAS resource types string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues' ResourceTypes field.
// Call this method to set AccountSignatureValues' ResourceTypes field.
func (rt *AccountResourceTypes) String() string {
var buffer bytes.Buffer
if rt.Service {
@ -200,3 +206,21 @@ func (rt *AccountResourceTypes) String() string {
}
return buffer.String()
}
// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string.
func parseAccountResourceTypes(s string) (AccountResourceTypes, error) {
rt := AccountResourceTypes{}
for _, r := range s {
switch r {
case 's':
rt.Service = true
case 'c':
rt.Container = true
case 'o':
rt.Object = true
default:
return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r)
}
}
return rt, nil
}

View file

@ -8,6 +8,7 @@ package sas
import (
"bytes"
"errors"
"fmt"
"strings"
"time"
@ -24,7 +25,7 @@ type BlobSignatureValues struct {
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
SnapshotTime time.Time
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
Permissions string `param:"sp"` // Create by initializing ContainerPermissions or BlobPermissions and then call String()
IPRange IPRange `param:"sip"`
Identifier string `param:"si"`
ContainerName string
@ -50,8 +51,8 @@ func getDirectoryDepth(path string) string {
// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters.
func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) {
if sharedKeyCredential == nil {
return QueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential")
if v.ExpiryTime.IsZero() || v.Permissions == "" {
return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions")
}
//Make sure the permission characters are in the correct order
@ -141,6 +142,10 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us
return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key")
}
if v.ExpiryTime.IsZero() || v.Permissions == "" {
return QueryParameters{}, errors.New("user delegation SAS is missing at least one of these: ExpiryTime or Permissions")
}
// Parse the resource
resource := "c"
if !v.SnapshotTime.IsZero() {
@ -261,15 +266,15 @@ func getCanonicalName(account string, containerName string, blobName string, dir
}
// ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
// Initialize an instance of this type and then call Client.GetSASURL with it or use the String method to set BlobSASSignatureValues Permissions field.
// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field.
// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob
type ContainerPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, List, FilterByTags, Move, SetImmutabilityPolicy bool
Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts
Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, FilterByTags, Move, SetImmutabilityPolicy bool
Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts
}
// String produces the SAS permissions string for an Azure Storage container.
// Call this method to set BlobSASSignatureValues' Permissions field.
// Call this method to set BlobSignatureValues' Permissions field.
func (p *ContainerPermissions) String() string {
var b bytes.Buffer
if p.Read {
@ -293,6 +298,9 @@ func (p *ContainerPermissions) String() string {
if p.List {
b.WriteRune('l')
}
if p.Tag {
b.WriteRune('t')
}
if p.FilterByTags {
b.WriteRune('f')
}
@ -333,6 +341,8 @@ func parseContainerPermissions(s string) (ContainerPermissions, error) {
p.DeletePreviousVersion = true
case 'l':
p.List = true
case 't':
p.Tag = true
case 'f':
p.FilterByTags = true
case 'm':
@ -353,13 +363,13 @@ func parseContainerPermissions(s string) (ContainerPermissions, error) {
}
// BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
// Initialize an instance of this type and then call Client.GetSASURL with it or use the String method to set BlobSASSignatureValues Permissions field.
// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field.
type BlobPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Tag, Move, Execute, Ownership, Permissions, SetImmutabilityPolicy bool
}
// String produces the SAS permissions string for an Azure Storage blob.
// Call this method to set BlobSignatureValue's Permissions field.
// Call this method to set BlobSignatureValues' Permissions field.
func (p *BlobPermissions) String() string {
var b bytes.Buffer
if p.Read {

View file

@ -0,0 +1,94 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package service
import (
"context"
"fmt"
"net/url"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
)
// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests.
// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier.
type BatchBuilder struct {
endpoint string
authPolicy policy.Policy
subRequests []*policy.Request
operationType *exported.BlobBatchOperationType
}
func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error {
if bb.operationType == nil {
bb.operationType = &operationType
return nil
}
if *bb.operationType != operationType {
return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType)
}
return nil
}
// Delete operation is used to add delete sub-request to the batch builder.
func (bb *BatchBuilder) Delete(containerName string, blobName string, options *BatchDeleteOptions) error {
err := bb.checkOperationType(exported.BatchDeleteOperationType)
if err != nil {
return err
}
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName)
blobClient, err := blob.NewClientWithNoCredential(blobURL, nil)
if err != nil {
return err
}
deleteOptions, leaseInfo, accessConditions := options.format()
req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions)
if err != nil {
return err
}
// remove x-ms-version header
exported.UpdateSubRequestHeaders(req)
bb.subRequests = append(bb.subRequests, req)
return nil
}
// SetTier operation is used to add set tier sub-request to the batch builder.
func (bb *BatchBuilder) SetTier(containerName string, blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error {
err := bb.checkOperationType(exported.BatchSetTierOperationType)
if err != nil {
return err
}
blobName = url.PathEscape(blobName)
blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName)
blobClient, err := blob.NewClientWithNoCredential(blobURL, nil)
if err != nil {
return err
}
setTierOptions, leaseInfo, accessConditions := options.format()
req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions)
if err != nil {
return err
}
// remove x-ms-version header
exported.UpdateSubRequestHeaders(req)
bb.subRequests = append(bb.subRequests, req)
return nil
}

View file

@ -7,8 +7,13 @@
package service
import (
"bytes"
"context"
"errors"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"net/http"
"strings"
"time"
@ -18,7 +23,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
@ -26,9 +30,7 @@ import (
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
type ClientOptions base.ClientOptions
// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers.
type Client base.Client[generated.ServiceClient]
@ -38,12 +40,12 @@ type Client base.Client[generated.ServiceClient]
// - cred - an Azure AD credential, typically obtained via the azidentity module
// - options - client options; pass nil to accept the default values
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
authPolicy := shared.NewStorageChallengePolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil
return (*Client)(base.NewServiceClient(serviceURL, pl, &cred)), nil
}
// NewClientWithNoCredential creates an instance of Client with the specified values.
@ -115,6 +117,15 @@ func (s *Client) sharedKey() *SharedKeyCredential {
return base.SharedKey((*base.Client[generated.ServiceClient])(s))
}
func (s *Client) credential() any {
return base.Credential((*base.Client[generated.ServiceClient])(s))
}
// helper method to return the generated.BlobClient which is used for creating the sub-requests
func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient {
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (s *Client) URL() string {
return s.generated().Endpoint()
@ -124,7 +135,7 @@ func (s *Client) URL() string {
// this Client's URL. The new container.Client uses the same request policy pipeline as the Client.
func (s *Client) NewContainerClient(containerName string) *container.Client {
containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName)
return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey()))
return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.credential()))
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
@ -154,6 +165,7 @@ func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName stri
}
// GetAccountInfo provides account level information
// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures.
func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) {
getAccountInfoOptions := o.format()
resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions)
@ -280,3 +292,68 @@ func (s *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOp
resp, err := s.generated().FilterBlobs(ctx, where, serviceFilterBlobsOptions)
return resp, err
}
// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client.
// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests.
// All sub-requests in the batch must be of the same type, either delete or set tier.
// NOTE: Service level Blob Batch operation is supported only when the Client was created using SharedKeyCredential and Account SAS.
func (s *Client) NewBatchBuilder() (*BatchBuilder, error) {
var authPolicy policy.Policy
switch cred := s.credential().(type) {
case *azcore.TokenCredential:
authPolicy = shared.NewStorageChallengePolicy(*cred)
case *SharedKeyCredential:
authPolicy = exported.NewSharedKeyCredPolicy(cred)
case nil:
// for authentication using SAS
authPolicy = nil
default:
return nil, fmt.Errorf("unrecognised authentication type %T", cred)
}
return &BatchBuilder{
endpoint: s.URL(),
authPolicy: authPolicy,
}, nil
}
// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request.
// It builds the request body using the BatchBuilder object passed.
// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch.
func (s *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) {
if bb == nil || len(bb.subRequests) == 0 {
return SubmitBatchResponse{}, errors.New("batch builder is empty")
}
// create the request body
batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{
AuthPolicy: bb.authPolicy,
SubRequests: bb.subRequests,
})
if err != nil {
return SubmitBatchResponse{}, err
}
reader := bytes.NewReader(batchReq)
rsc := streaming.NopCloser(reader)
multipartContentType := "multipart/mixed; boundary=" + batchID
resp, err := s.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format())
if err != nil {
return SubmitBatchResponse{}, err
}
batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests)
if err != nil {
return SubmitBatchResponse{}, err
}
return SubmitBatchResponse{
Responses: batchResponses,
ContentType: resp.ContentType,
RequestID: resp.RequestID,
Version: resp.Version,
}, nil
}

View file

@ -8,6 +8,7 @@ package service
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
@ -299,3 +300,59 @@ func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions
Maxresults: o.MaxResults,
}
}
// ---------------------------------------------------------------------------------------------------------------------
// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method.
type BatchDeleteOptions struct {
blob.DeleteOptions
VersionID *string
Snapshot *string
}
func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
basics := generated.BlobClientDeleteOptions{
DeleteSnapshots: o.DeleteSnapshots,
DeleteType: o.BlobDeleteType, // None by default
Snapshot: o.Snapshot,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, leaseAccessConditions, modifiedAccessConditions
}
// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method.
type BatchSetTierOptions struct {
blob.SetTierOptions
VersionID *string
Snapshot *string
}
func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
basics := generated.BlobClientSetTierOptions{
RehydratePriority: o.RehydratePriority,
Snapshot: o.Snapshot,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, leaseAccessConditions, modifiedAccessConditions
}
// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method.
type SubmitBatchOptions struct {
// placeholder for future options
}
func (o *SubmitBatchOptions) format() *generated.ServiceClientSubmitBatchOptions {
return nil
}

View file

@ -7,6 +7,7 @@
package service
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
@ -42,3 +43,21 @@ type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse
// GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey.
type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse
// SubmitBatchResponse contains the response from method Client.SubmitBatch.
type SubmitBatchResponse struct {
// Responses contains the responses of the sub-requests in the batch
Responses []*BatchResponseItem
// ContentType contains the information returned from the Content-Type header response.
ContentType *string
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
// Version contains the information returned from the x-ms-version header response.
Version *string
}
// BatchResponseItem contains the response for the individual sub-requests.
type BatchResponseItem = exported.BatchResponseItem

View file

@ -1,3 +1,5 @@
# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/).
# Azure Active Directory authentication for Go
This is a standalone package for authenticating with Azure Active
@ -18,7 +20,7 @@ go get -u github.com/Azure/go-autorest/autorest/adal
## Usage
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
### Register an Azure AD Application with secret
@ -88,7 +90,7 @@ An Active Directory application is required in order to use this library. An app
### Grant the necessary permissions
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles)
which can be assigned to a service principal of an Azure AD application depending of your needs.
```
@ -104,7 +106,7 @@ It is also possible to define custom role definitions.
az role definition create --role-definition role-definition.json
```
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
### Acquire Access Token

View file

@ -177,7 +177,7 @@ func (t Token) WillExpireIn(d time.Duration) bool {
return !t.Expires().After(time.Now().Add(d))
}
//OAuthToken return the current access token
// OAuthToken return the current access token
func (t *Token) OAuthToken() string {
return t.AccessToken
}
@ -365,6 +365,25 @@ func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, err
})
}
// ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs.
type ServicePrincipalFederatedSecret struct {
jwt string
}
// SetAuthenticationValues is a method of the interface ServicePrincipalSecret.
// It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer.
func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error {
v.Set("client_assertion", secret.jwt)
v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer")
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (secret ServicePrincipalFederatedSecret) MarshalJSON() ([]byte, error) {
return nil, errors.New("marshalling ServicePrincipalFederatedSecret is not supported")
}
// ServicePrincipalToken encapsulates a Token created for a Service Principal.
type ServicePrincipalToken struct {
inner servicePrincipalToken
@ -419,6 +438,8 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{}
case "ServicePrincipalAuthorizationCodeSecret":
spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{}
case "ServicePrincipalFederatedSecret":
return errors.New("unmarshalling ServicePrincipalFederatedSecret is not supported")
default:
return fmt.Errorf("unrecognized token type '%s'", secret["type"])
}
@ -665,6 +686,31 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie
)
}
// NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT.
func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateOAuthConfig(oauthConfig); err != nil {
return nil, err
}
if err := validateStringParam(clientID, "clientID"); err != nil {
return nil, err
}
if err := validateStringParam(resource, "resource"); err != nil {
return nil, err
}
if jwt == "" {
return nil, fmt.Errorf("parameter 'jwt' cannot be empty")
}
return NewServicePrincipalTokenWithSecret(
oauthConfig,
clientID,
resource,
&ServicePrincipalFederatedSecret{
jwt: jwt,
},
callbacks...,
)
}
type msiType int
const (
@ -1058,8 +1104,8 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
// AAD returns expires_in as a string, ADFS returns it as an int
ExpiresIn json.Number `json:"expires_in"`
// expires_on can be in two formats, a UTC time stamp or the number of seconds.
ExpiresOn string `json:"expires_on"`
// expires_on can be in three formats, a UTC time stamp, or the number of seconds as a string *or* int.
ExpiresOn interface{} `json:"expires_on"`
NotBefore json.Number `json:"not_before"`
Resource string `json:"resource"`
@ -1072,7 +1118,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
}
expiresOn := json.Number("")
// ADFS doesn't include the expires_on field
if token.ExpiresOn != "" {
if token.ExpiresOn != nil {
if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil {
return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp)
}
@ -1089,18 +1135,27 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
}
// converts expires_on to the number of seconds
func parseExpiresOn(s string) (json.Number, error) {
// convert the expiration date to the number of seconds from now
timeToDuration := func(t time.Time) json.Number {
dur := t.Sub(time.Now().UTC())
return json.Number(strconv.FormatInt(int64(dur.Round(time.Second).Seconds()), 10))
func parseExpiresOn(s interface{}) (json.Number, error) {
// the JSON unmarshaler treats JSON numbers unmarshaled into an interface{} as float64
asFloat64, ok := s.(float64)
if ok {
// this is the number of seconds as int case
return json.Number(strconv.FormatInt(int64(asFloat64), 10)), nil
}
if _, err := strconv.ParseInt(s, 10, 64); err == nil {
asStr, ok := s.(string)
if !ok {
return "", fmt.Errorf("unexpected expires_on type %T", s)
}
// convert the expiration date to the number of seconds from the unix epoch
timeToDuration := func(t time.Time) json.Number {
return json.Number(strconv.FormatInt(t.UTC().Unix(), 10))
}
if _, err := json.Number(asStr).Int64(); err == nil {
// this is the number of seconds case, no conversion required
return json.Number(s), nil
} else if eo, err := time.Parse(expiresOnDateFormatPM, s); err == nil {
return json.Number(asStr), nil
} else if eo, err := time.Parse(expiresOnDateFormatPM, asStr); err == nil {
return timeToDuration(eo), nil
} else if eo, err := time.Parse(expiresOnDateFormat, s); err == nil {
} else if eo, err := time.Parse(expiresOnDateFormat, asStr); err == nil {
return timeToDuration(eo), nil
} else {
// unknown format
@ -1317,12 +1372,25 @@ func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTena
// MSIAvailable returns true if the MSI endpoint is available for authentication.
func MSIAvailable(ctx context.Context, s Sender) bool {
msiType, _, err := getMSIType()
if err != nil {
return false
}
if msiType != msiTypeIMDS {
return true
}
if s == nil {
s = sender()
}
resp, err := getMSIEndpoint(ctx, s)
if err == nil {
resp.Body.Close()
}
return err == nil
}

View file

@ -6,33 +6,33 @@ generated Go code.
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
and Responding. A typical pattern is:
req, err := Prepare(&http.Request{},
token.WithAuthorization())
req, err := Prepare(&http.Request{},
token.WithAuthorization())
resp, err := Send(req,
WithLogging(logger),
DoErrorIfStatusCode(http.StatusInternalServerError),
DoCloseIfError(),
DoRetryForAttempts(5, time.Second))
resp, err := Send(req,
WithLogging(logger),
DoErrorIfStatusCode(http.StatusInternalServerError),
DoCloseIfError(),
DoRetryForAttempts(5, time.Second))
err = Respond(resp,
ByDiscardingBody(),
ByClosing())
err = Respond(resp,
ByDiscardingBody(),
ByClosing())
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
and then pass the data along, pass the data first and then modify the result, or wrap themselves
around passing the data (such as a logger might do). Decorators run in the order provided. For
example, the following:
req, err := Prepare(&http.Request{},
WithBaseURL("https://microsoft.com/"),
WithPath("a"),
WithPath("b"),
WithPath("c"))
req, err := Prepare(&http.Request{},
WithBaseURL("https://microsoft.com/"),
WithPath("a"),
WithPath("b"),
WithPath("c"))
will set the URL to:
https://microsoft.com/a/b/c
https://microsoft.com/a/b/c
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders

View file

@ -214,7 +214,7 @@ func (r Resource) String() string {
// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/templates/template-functions-resource?tabs=json#resourceid.
func ParseResourceID(resourceID string) (Resource, error) {
const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)`
const resourceIDPatternText = `(?i)^/subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)$`
resourceIDPattern := regexp.MustCompile(resourceIDPatternText)
match := resourceIDPattern.FindStringSubmatch(resourceID)

View file

@ -60,9 +60,9 @@ func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder {
// is especially useful if there is a chance the data will fail to decode.
// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v
// is the decoding destination.
func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) {
b := bytes.Buffer{}
return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v)
func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (b bytes.Buffer, err error) {
err = NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v)
return
}
// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc.