images: update dependency

Update the images dependency to get the new version gates for Fedora.
This commit is contained in:
Simon de Vlieger 2024-03-04 08:47:59 +01:00 committed by Tomáš Hozza
parent b4dcfc4480
commit 073e304978
21 changed files with 825 additions and 719 deletions

View file

@ -1,5 +1,12 @@
# Release History
## 1.3.1 (2024-02-28)
### Bugs Fixed
* Re-enabled `SharedKeyCredential` authentication mode for non TLS protected endpoints.
* Use random write in `DownloadFile` method. Fixes [#22426](https://github.com/Azure/azure-sdk-for-go/issues/22426).
## 1.3.0 (2024-02-12)
### Bugs Fixed

View file

@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/storage/azblob",
"Tag": "go/storage/azblob_9f40a5a13d"
"Tag": "go/storage/azblob_71b0a04c12"
}

View file

@ -9,7 +9,6 @@ package blob
import (
"context"
"io"
"math"
"os"
"sync"
"time"
@ -359,7 +358,7 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
NumChunks: uint16(((count - 1) / o.BlockSize) + 1),
NumChunks: uint64(((count - 1) / o.BlockSize) + 1),
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, chunkStart int64, count int64) error {
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
@ -398,165 +397,6 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl
return count, nil
}
// downloadFile downloads an Azure blob to a Writer. The blocks are downloaded parallely,
// but written to file serially
func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadOptions) (int64, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if o.BlockSize == 0 {
o.BlockSize = DefaultDownloadBlockSize
}
if o.Concurrency == 0 {
o.Concurrency = DefaultConcurrency
}
count := o.Range.Count
if count == CountToEnd { //Calculate size if not specified
gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions())
if err != nil {
return 0, err
}
count = *gr.ContentLength - o.Range.Offset
}
if count <= 0 {
// The file is empty, there is nothing to download.
return 0, nil
}
progress := int64(0)
progressLock := &sync.Mutex{}
// helper routine to get body
getBodyForRange := func(ctx context.Context, chunkStart, size int64) (io.ReadCloser, error) {
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
Offset: chunkStart + o.Range.Offset,
Count: size,
}, nil)
dr, err := b.DownloadStream(ctx, downloadBlobOptions)
if err != nil {
return nil, err
}
var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
body = streaming.NewResponseProgress(
body,
func(bytesTransferred int64) {
diff := bytesTransferred - rangeProgress
rangeProgress = bytesTransferred
progressLock.Lock()
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
return body, nil
}
// if file fits in a single buffer, we'll download here.
if count <= o.BlockSize {
body, err := getBodyForRange(ctx, int64(0), count)
if err != nil {
return 0, err
}
defer body.Close()
return io.Copy(writer, body)
}
buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize)
defer buffers.Free()
numChunks := uint16((count-1)/o.BlockSize + 1)
for bufferCounter := float64(0); bufferCounter < math.Min(float64(numChunks), float64(o.Concurrency)); bufferCounter++ {
if _, err := buffers.Grow(); err != nil {
return 0, err
}
}
acquireBuffer := func() ([]byte, error) {
return <-buffers.Acquire(), nil
}
blocks := make([]chan []byte, numChunks)
for b := range blocks {
blocks[b] = make(chan []byte)
}
/*
* We have created as many channels as the number of chunks we have.
* Each downloaded block will be sent to the channel matching its
* sequence number, i.e. 0th block is sent to 0th channel, 1st block
* to 1st channel and likewise. The blocks are then read and written
* to the file serially by below goroutine. Do note that the blocks
* are still downloaded parallelly from n/w, only serialized
* and written to file here.
*/
writerError := make(chan error)
writeSize := int64(0)
go func(ch chan error) {
for _, block := range blocks {
select {
case <-ctx.Done():
return
case block := <-block:
n, err := writer.Write(block)
writeSize += int64(n)
buffers.Release(block[:cap(block)])
if err != nil {
ch <- err
return
}
}
}
ch <- nil
}(writerError)
// Prepare and do parallel download.
err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
NumChunks: numChunks,
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, chunkStart int64, count int64) error {
buff, err := acquireBuffer()
if err != nil {
return err
}
body, err := getBodyForRange(ctx, chunkStart, count)
if err != nil {
buffers.Release(buff)
return nil
}
_, err = io.ReadFull(body, buff[:count])
body.Close()
if err != nil {
return err
}
blockIndex := chunkStart / o.BlockSize
blocks[blockIndex] <- buff[:count]
return nil
},
})
if err != nil {
return 0, err
}
// error from writer thread.
if err = <-writerError; err != nil {
return 0, err
}
return writeSize, nil
}
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) {
@ -596,11 +436,6 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil
}
do := (*downloadOptions)(o)
filePointer, err := file.Seek(0, io.SeekCurrent)
if err != nil {
return 0, err
}
// 1. Calculate the size of the destination file
var size int64
@ -629,15 +464,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil
}
if size > 0 {
writeSize, err := b.downloadFile(ctx, file, *do)
if err != nil {
return 0, err
}
_, err = file.Seek(filePointer, io.SeekStart)
if err != nil {
return 0, err
}
return writeSize, nil
return b.downloadBuffer(ctx, file, *do)
} else { // if the blob's size is 0, there is no need in downloading it
return 0, nil
}

View file

@ -474,7 +474,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu
OperationName: "uploadFromReader",
TransferSize: actualSize,
ChunkSize: o.BlockSize,
NumChunks: uint16(((actualSize - 1) / o.BlockSize) + 1),
NumChunks: uint64(((actualSize - 1) / o.BlockSize) + 1),
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, offset int64, chunkSize int64) error {
// This function is called once per block.

View file

@ -11,9 +11,7 @@ import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
"net/http"
"net/url"
"sort"
@ -204,10 +202,6 @@ func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
return req.Next()
}
if err := checkHTTPSForAuth(req); err != nil {
return nil, err
}
if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" {
req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat))
}
@ -229,10 +223,3 @@ func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
}
return response, err
}
func checkHTTPSForAuth(req *policy.Request) error {
if strings.ToLower(req.Raw().URL.Scheme) != "https" {
return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints"))
}
return nil
}

View file

@ -8,5 +8,5 @@ package exported
const (
ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
ModuleVersion = "v1.3.0"
ModuleVersion = "v1.3.1"
)

View file

@ -19,7 +19,7 @@ const (
type BatchTransferOptions struct {
TransferSize int64
ChunkSize int64
NumChunks uint16
NumChunks uint64
Concurrency uint16
Operation func(ctx context.Context, offset int64, chunkSize int64) error
OperationName string
@ -54,7 +54,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
}
// Add each chunk's operation to the channel.
for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ {
for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ {
curChunkSize := o.ChunkSize
if chunkNum == o.NumChunks-1 { // Last chunk
@ -69,7 +69,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
// Wait for the operations to complete.
var firstErr error = nil
for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ {
for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ {
responseError := <-operationResponseChannel
// record the first error (the original error which should cause the other chunks to fail with canceled context)
if responseError != nil && firstErr == nil {