go.mod: bump osbuild/images to 0.55

This commit is contained in:
Sanne Raymaekers 2024-04-13 15:47:23 +02:00
parent eab44ca8a8
commit 22140aa7c9
700 changed files with 30353 additions and 27556 deletions

View file

@ -1,6 +1,32 @@
# Changes
## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.1...storage/v1.40.0) (2024-03-29)
### Features
* **storage:** Implement io.WriterTo in Reader ([#9659](https://github.com/googleapis/google-cloud-go/issues/9659)) ([8264a96](https://github.com/googleapis/google-cloud-go/commit/8264a962d1c21d52e8fca50af064c5535c3708d3))
* **storage:** New storage control client ([#9631](https://github.com/googleapis/google-cloud-go/issues/9631)) ([1f4d279](https://github.com/googleapis/google-cloud-go/commit/1f4d27957743878976d6b4549cc02a5bb894d330))
### Bug Fixes
* **storage:** Retry errors from last recv on uploads ([#9616](https://github.com/googleapis/google-cloud-go/issues/9616)) ([b6574aa](https://github.com/googleapis/google-cloud-go/commit/b6574aa42ebad0532c2749b6ece879b932f95cb9))
* **storage:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
### Performance Improvements
* **storage:** Remove protobuf's copy of data on unmarshalling ([#9526](https://github.com/googleapis/google-cloud-go/issues/9526)) ([81281c0](https://github.com/googleapis/google-cloud-go/commit/81281c04e503fd83301baf88cc352c77f5d476ca))
## [1.39.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.39.0...storage/v1.39.1) (2024-03-11)
### Bug Fixes
* **storage:** Add object validation case and test ([#9521](https://github.com/googleapis/google-cloud-go/issues/9521)) ([386bef3](https://github.com/googleapis/google-cloud-go/commit/386bef319b4678beaa926ddfe4edef190f11b68d))
## [1.39.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.38.0...storage/v1.39.0) (2024-02-29)

View file

@ -335,9 +335,10 @@ to add a [custom audit logging] header:
This package includes support for the Cloud Storage gRPC API, which is currently
in preview. This implementation uses gRPC rather than the current JSON & XML
APIs to make requests to Cloud Storage. If you would like to try the API,
please contact your GCP account rep for more information. The gRPC API is not
yet generally available, so it may be subject to breaking changes.
APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC
team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to
allowlist to access this API. The Go Storage gRPC library is not yet generally
available, so it may be subject to breaking changes.
To create a client which will use gRPC, use the alternate constructor:
@ -349,7 +350,7 @@ To create a client which will use gRPC, use the alternate constructor:
// Use client as usual.
If the application is running within GCP, users may get better performance by
enabling DirectPath (enabling requests to skip some proxy steps). To enable,
enabling Google Direct Access (enabling requests to skip some proxy steps). To enable,
set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
the following side-effect imports to your application:

View file

@ -19,6 +19,7 @@ import (
"encoding/base64"
"errors"
"fmt"
"hash/crc32"
"io"
"net/url"
"os"
@ -27,6 +28,7 @@ import (
"cloud.google.com/go/internal/trace"
gapic "cloud.google.com/go/storage/internal/apiv2"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"github.com/golang/protobuf/proto"
"github.com/googleapis/gax-go/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
@ -34,8 +36,10 @@ import (
"google.golang.org/api/option/internaloption"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protowire"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
)
@ -902,12 +906,50 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
return r, nil
}
// bytesCodec is a grpc codec which permits receiving messages as either
// protobuf messages, or as raw []bytes.
type bytesCodec struct {
encoding.Codec
}
func (bytesCodec) Marshal(v any) ([]byte, error) {
vv, ok := v.(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
}
return proto.Marshal(vv)
}
func (bytesCodec) Unmarshal(data []byte, v any) error {
switch v := v.(type) {
case *[]byte:
// If gRPC could recycle the data []byte after unmarshaling (through
// buffer pools), we would need to make a copy here.
*v = data
return nil
case proto.Message:
return proto.Unmarshal(data, v)
default:
return fmt.Errorf("can not unmarshal type %T", v)
}
}
func (bytesCodec) Name() string {
// If this isn't "", then gRPC sets the content-subtype of the call to this
// value and we get errors.
return ""
}
func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
s := callSettings(c.settings, opts...)
s.gax = append(s.gax, gax.WithGRPCOptions(
grpc.ForceCodec(bytesCodec{}),
))
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
@ -923,6 +965,8 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
req.Generation = params.gen
}
var databuf []byte
// Define a function that initiates a Read with offset and length, assuming
// we have already read seen bytes.
reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) {
@ -957,12 +1001,23 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
return err
}
msg, err = stream.Recv()
// Receive the message into databuf as a wire-encoded message so we can
// use a custom decoder to avoid an extra copy at the protobuf layer.
err := stream.RecvMsg(&databuf)
// These types of errors show up on the Recv call, rather than the
// initialization of the stream via ReadObject above.
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return ErrObjectNotExist
}
if err != nil {
return err
}
// Use a custom decoder that uses protobuf unmarshalling for all
// fields except the checksummed data.
// Subsequent receives in Read calls will skip all protobuf
// unmarshalling and directly read the content from the gRPC []byte
// response, since only the first call will contain other fields.
msg, err = readFullObjectResponse(databuf)
return err
}, s.retry, s.idempotent)
@ -988,6 +1043,16 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
// This is the size of the entire object, even if only a range was requested.
size := obj.GetSize()
// Only support checksums when reading an entire object, not a range.
var (
wantCRC uint32
checkCRC bool
)
if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 {
wantCRC = checksums.GetCrc32C()
checkCRC = true
}
r = &Reader{
Attrs: ReaderObjectAttrs{
Size: size,
@ -1008,7 +1073,11 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
leftovers: msg.GetChecksummedData().GetContent(),
settings: s,
zeroRange: params.length == 0,
databuf: databuf,
wantCRC: wantCRC,
checkCRC: checkCRC,
},
checkCRC: checkCRC,
}
cr := msg.GetContentRange()
@ -1026,12 +1095,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
r.reader.Close()
}
// Only support checksums when reading an entire object, not a range.
if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 {
r.wantCRC = checksums.GetCrc32C()
r.checkCRC = true
}
return r, nil
}
@ -1406,14 +1469,37 @@ type gRPCReader struct {
stream storagepb.Storage_ReadObjectClient
reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error)
leftovers []byte
databuf []byte
cancel context.CancelFunc
settings *settings
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
}
// Update the running CRC with the data in the slice, if CRC checking was enabled.
func (r *gRPCReader) updateCRC(b []byte) {
if r.checkCRC {
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b)
}
}
// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled.
func (r *gRPCReader) runCRCCheck() error {
if r.checkCRC && r.gotCRC != r.wantCRC {
return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC)
}
return nil
}
// Read reads bytes into the user's buffer from an open gRPC stream.
func (r *gRPCReader) Read(p []byte) (int, error) {
// The entire object has been read by this reader, return EOF.
// The entire object has been read by this reader, check the checksum if
// necessary and return EOF.
if r.size == r.seen || r.zeroRange {
if err := r.runCRCCheck(); err != nil {
return 0, err
}
return 0, io.EOF
}
@ -1422,7 +1508,7 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
// using the same reader. One encounters an error and the stream is closed
// and then reopened while the other routine attempts to read from it.
if r.stream == nil {
return 0, fmt.Errorf("reader has been closed")
return 0, fmt.Errorf("storage: reader has been closed")
}
var n int
@ -1431,12 +1517,13 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
if len(r.leftovers) > 0 {
n = copy(p, r.leftovers)
r.seen += int64(n)
r.updateCRC(p[:n])
r.leftovers = r.leftovers[n:]
return n, nil
}
// Attempt to Recv the next message on the stream.
msg, err := r.recv()
content, err := r.recv()
if err != nil {
return 0, err
}
@ -1448,7 +1535,6 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
content := msg.GetChecksummedData().GetContent()
n = copy(p[n:], content)
leftover := len(content) - n
if leftover > 0 {
@ -1457,10 +1543,78 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
r.leftovers = content[n:]
}
r.seen += int64(n)
r.updateCRC(p[:n])
return n, nil
}
// WriteTo writes all the data requested by the Reader into w, implementing
// io.WriterTo.
func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// The entire object has been read by this reader, check the checksum if
// necessary and return nil.
if r.size == r.seen || r.zeroRange {
if err := r.runCRCCheck(); err != nil {
return 0, err
}
return 0, nil
}
// No stream to read from, either never initialized or Close was called.
// Note: There is a potential concurrency issue if multiple routines are
// using the same reader. One encounters an error and the stream is closed
// and then reopened while the other routine attempts to read from it.
if r.stream == nil {
return 0, fmt.Errorf("storage: reader has been closed")
}
// Track bytes written during before call.
var alreadySeen = r.seen
// Write any leftovers to the stream. There will be some leftovers from the
// original NewRangeReader call.
if len(r.leftovers) > 0 {
// Write() will write the entire leftovers slice unless there is an error.
written, err := w.Write(r.leftovers)
r.seen += int64(written)
r.updateCRC(r.leftovers)
r.leftovers = nil
if err != nil {
return r.seen - alreadySeen, err
}
}
// Loop and receive additional messages until the entire data is written.
for {
// Attempt to receive the next message on the stream.
// Will terminate with io.EOF once data has all come through.
// recv() handles stream reopening and retry logic so no need for retries here.
msg, err := r.recv()
if err != nil {
if err == io.EOF {
// We are done; check the checksum if necessary and return.
err = r.runCRCCheck()
}
return r.seen - alreadySeen, err
}
// TODO: Determine if we need to capture incremental CRC32C for this
// chunk. The Object CRC32C checksum is captured when directed to read
// the entire Object. If directed to read a range, we may need to
// calculate the range's checksum for verification if the checksum is
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
written, err := w.Write(msg)
r.seen += int64(written)
r.updateCRC(msg)
if err != nil {
return r.seen - alreadySeen, err
}
}
}
// Close cancels the read stream's context in order for it to be closed and
// collected.
func (r *gRPCReader) Close() error {
@ -1471,9 +1625,10 @@ func (r *gRPCReader) Close() error {
return nil
}
// recv attempts to Recv the next message on the stream. In the event
// that a retryable error is encountered, the stream will be closed, reopened,
// and Recv again. This will attempt to Recv until one of the following is true:
// recv attempts to Recv the next message on the stream and extract the object
// data that it contains. In the event that a retryable error is encountered,
// the stream will be closed, reopened, and RecvMsg again.
// This will attempt to Recv until one of the following is true:
//
// * Recv is successful
// * A non-retryable error is encountered
@ -1481,8 +1636,9 @@ func (r *gRPCReader) Close() error {
//
// The last error received is the one that is returned, which could be from
// an attempt to reopen the stream.
func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) {
msg, err := r.stream.Recv()
func (r *gRPCReader) recv() ([]byte, error) {
err := r.stream.RecvMsg(&r.databuf)
var shouldRetry = ShouldRetry
if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
shouldRetry = r.settings.retry.shouldRetry
@ -1492,10 +1648,195 @@ func (r *gRPCReader) recv() (*storagepb.ReadObjectResponse, error) {
// reopen the stream, but will backoff if further attempts are necessary.
// Reopening the stream Recvs the first message, so if retrying is
// successful, the next logical chunk will be returned.
msg, err = r.reopenStream()
msg, err := r.reopenStream()
return msg.GetChecksummedData().GetContent(), err
}
return msg, err
if err != nil {
return nil, err
}
return readObjectResponseContent(r.databuf)
}
// ReadObjectResponse field and subfield numbers.
const (
checksummedDataField = protowire.Number(1)
checksummedDataContentField = protowire.Number(1)
checksummedDataCRC32CField = protowire.Number(2)
objectChecksumsField = protowire.Number(2)
contentRangeField = protowire.Number(3)
metadataField = protowire.Number(4)
)
// readObjectResponseContent returns the checksummed_data.content field of a
// ReadObjectResponse message, or an error if the message is invalid.
// This can be used on recvs of objects after the first recv, since only the
// first message will contain non-data fields.
func readObjectResponseContent(b []byte) ([]byte, error) {
checksummedData, err := readProtoBytes(b, checksummedDataField)
if err != nil {
return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err)
}
content, err := readProtoBytes(checksummedData, checksummedDataContentField)
if err != nil {
return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err)
}
return content, nil
}
// readFullObjectResponse returns the ReadObjectResponse that is encoded in the
// wire-encoded message buffer b, or an error if the message is invalid.
// This must be used on the first recv of an object as it may contain all fields
// of ReadObjectResponse, and we use or pass on those fields to the user.
// This function is essentially identical to proto.Unmarshal, except it aliases
// the data in the input []byte. If the proto library adds a feature to
// Unmarshal that does that, this function can be dropped.
func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) {
msg := &storagepb.ReadObjectResponse{}
// Loop over the entire message, extracting fields as we go. This does not
// handle field concatenation, in which the contents of a single field
// are split across multiple protobuf tags.
off := 0
for off < len(b) {
// Consume the next tag. This will tell us which field is next in the
// buffer, its type, and how much space it takes up.
fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:])
if fieldLength < 0 {
return nil, protowire.ParseError(fieldLength)
}
off += fieldLength
// Unmarshal the field according to its type. Only fields that are not
// nil will be present.
switch {
case fieldNum == checksummedDataField && fieldType == protowire.BytesType:
// The ChecksummedData field was found. Initialize the struct.
msg.ChecksummedData = &storagepb.ChecksummedData{}
// Get the bytes corresponding to the checksummed data.
fieldContent, n := protowire.ConsumeBytes(b[off:])
if n < 0 {
return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n))
}
off += n
// Get the nested fields. We need to do this manually as it contains
// the object content bytes.
contentOff := 0
for contentOff < len(fieldContent) {
gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:])
if n < 0 {
return nil, protowire.ParseError(n)
}
contentOff += n
switch {
case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType:
// Get the content bytes.
bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:])
if n < 0 {
return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n))
}
msg.ChecksummedData.Content = bytes
contentOff += n
case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type:
v, n := protowire.ConsumeFixed32(fieldContent[contentOff:])
if n < 0 {
return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n))
}
msg.ChecksummedData.Crc32C = &v
contentOff += n
default:
n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:])
if n < 0 {
return nil, protowire.ParseError(n)
}
contentOff += n
}
}
case fieldNum == objectChecksumsField && fieldType == protowire.BytesType:
// The field was found. Initialize the struct.
msg.ObjectChecksums = &storagepb.ObjectChecksums{}
// Get the bytes corresponding to the checksums.
bytes, n := protowire.ConsumeBytes(b[off:])
if n < 0 {
return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n))
}
off += n
// Unmarshal.
if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil {
return nil, err
}
case fieldNum == contentRangeField && fieldType == protowire.BytesType:
msg.ContentRange = &storagepb.ContentRange{}
bytes, n := protowire.ConsumeBytes(b[off:])
if n < 0 {
return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n))
}
off += n
if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil {
return nil, err
}
case fieldNum == metadataField && fieldType == protowire.BytesType:
msg.Metadata = &storagepb.Object{}
bytes, n := protowire.ConsumeBytes(b[off:])
if n < 0 {
return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n))
}
off += n
if err := proto.Unmarshal(bytes, msg.Metadata); err != nil {
return nil, err
}
default:
fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:])
if fieldLength < 0 {
return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength))
}
off += fieldLength
}
}
return msg, nil
}
// readProtoBytes returns the contents of the protobuf field with number num
// and type bytes from a wire-encoded message. If the field cannot be found,
// the returned slice will be nil and no error will be returned.
//
// It does not handle field concatenation, in which the contents of a single field
// are split across multiple protobuf tags. Encoded data containing split fields
// of this form is technically permissable, but uncommon.
func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) {
off := 0
for off < len(b) {
gotNum, gotTyp, n := protowire.ConsumeTag(b[off:])
if n < 0 {
return nil, protowire.ParseError(n)
}
off += n
if gotNum == num && gotTyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b[off:])
if n < 0 {
return nil, protowire.ParseError(n)
}
return b, nil
}
n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:])
if n < 0 {
return nil, protowire.ParseError(n)
}
off += n
}
return nil, nil
}
// reopenStream "closes" the existing stream and attempts to reopen a stream and
@ -1630,6 +1971,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// Send a request with as many bytes as possible.
// Loop until all bytes are sent.
sendBytes: // label this loop so that we can use a continue statement from a nested block
for {
bytesNotYetSent := recvd - sent
remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize
@ -1707,10 +2049,6 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// we retry.
w.stream = nil
// Drop the stream reference as a new one will need to be created if
// we can retry the upload
w.stream = nil
// Retriable errors mean we should start over and attempt to
// resend the entire buffer via a new stream.
// If not retriable, falling through will return the error received.
@ -1724,7 +2062,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// Continue sending requests, opening a new stream and resending
// any bytes not yet persisted as per QueryWriteStatus
continue
continue sendBytes
}
}
if err != nil {
@ -1739,7 +2077,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// Not done sending data, do not attempt to commit it yet, loop around
// and send more data.
if recvd-sent > 0 {
continue
continue sendBytes
}
// The buffer has been uploaded and there is still more data to be
@ -1770,7 +2108,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// Drop the stream reference as a new one will need to be created.
w.stream = nil
continue
continue sendBytes
}
if err != nil {
return nil, 0, err
@ -1780,7 +2118,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// Retry if not all bytes were persisted.
writeOffset = resp.GetPersistedSize()
sent = int(writeOffset) - int(start)
continue
continue sendBytes
}
} else {
// If the object is done uploading, close the send stream to signal
@ -1800,6 +2138,15 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
var obj *storagepb.Object
for obj == nil {
resp, err := w.stream.Recv()
if shouldRetry(err) {
writeOffset, err = w.determineOffset(start)
if err != nil {
return nil, 0, err
}
sent = int(writeOffset) - int(start)
w.stream = nil
continue sendBytes
}
if err != nil {
return nil, 0, err
}

View file

@ -19,6 +19,7 @@ import (
"encoding/base64"
"errors"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
@ -1218,9 +1219,12 @@ func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket strin
}
type httpReader struct {
body io.ReadCloser
seen int64
reopen func(seen int64) (*http.Response, error)
body io.ReadCloser
seen int64
reopen func(seen int64) (*http.Response, error)
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
}
func (r *httpReader) Read(p []byte) (int, error) {
@ -1229,7 +1233,22 @@ func (r *httpReader) Read(p []byte) (int, error) {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if err == nil || err == io.EOF {
if r.checkCRC {
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
}
if err == nil {
return n, nil
}
if err == io.EOF {
// Check CRC here. It would be natural to check it in Close, but
// everybody defers Close on the assumption that it doesn't return
// anything worth looking at.
if r.checkCRC {
if r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
}
}
return n, err
}
// Read failed (likely due to connection issues), but we will try to reopen
@ -1435,11 +1454,12 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
Attrs: attrs,
size: size,
remain: remain,
wantCRC: crc,
checkCRC: checkCRC,
reader: &httpReader{
reopen: reopen,
body: body,
reopen: reopen,
body: body,
wantCRC: crc,
checkCRC: checkCRC,
},
}, nil
}

File diff suppressed because it is too large Load diff

View file

@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.39.0"
const Version = "1.40.0"

View file

@ -198,9 +198,7 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
type Reader struct {
Attrs ReaderObjectAttrs
seen, remain, size int64
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
checkCRC bool // Did we check the CRC? This is now only used by tests.
reader io.ReadCloser
ctx context.Context
@ -218,17 +216,17 @@ func (r *Reader) Read(p []byte) (int, error) {
if r.remain != -1 {
r.remain -= int64(n)
}
if r.checkCRC {
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
// Check CRC here. It would be natural to check it in Close, but
// everybody defers Close on the assumption that it doesn't return
// anything worth looking at.
if err == io.EOF {
if r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
}
}
return n, err
}
// WriteTo writes all the data from the Reader to w. Fulfills the io.WriterTo interface.
// This is called implicitly when calling io.Copy on a Reader.
func (r *Reader) WriteTo(w io.Writer) (int64, error) {
// This implicitly calls r.reader.WriteTo for gRPC only. JSON and XML don't have an
// implementation of WriteTo.
n, err := io.Copy(w, r.reader)
if r.remain != -1 {
r.remain -= int64(n)
}
return n, err
}

View file

@ -1096,6 +1096,10 @@ func (o *ObjectHandle) validate() error {
if !utf8.ValidString(o.object) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
}
// Names . and .. are not valid; see https://cloud.google.com/storage/docs/objects#naming
if o.object == "." || o.object == ".." {
return fmt.Errorf("storage: object name %q is not valid", o.object)
}
return nil
}