go: update most dependencies to the latest version

It's a lot of work for dependabot for our outdated deps, let's
help it by making one huge manual update.

Signed-off-by: Ondřej Budai <ondrej@budai.cz>
This commit is contained in:
Ondřej Budai 2021-09-05 11:41:20 +02:00 committed by Tom Gundersen
parent 08a937c9c1
commit 19a3bdf450
595 changed files with 91597 additions and 21922 deletions

View file

@ -23,13 +23,14 @@ import (
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
storagepb "google.golang.org/genproto/googleapis/storage/v2"
"google.golang.org/protobuf/proto"
)
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
@ -95,6 +96,10 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if o.c.gc != nil {
return o.newRangeReaderWithGRPC(ctx, offset, length)
}
if err := o.validate(); err != nil {
return nil, err
}
@ -135,6 +140,11 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
// If the context has already expired, return immediately without making a
// call.
if err := ctx.Err(); err != nil {
return nil, err
}
start := offset + seen
if length < 0 && start < 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d", start))
@ -145,7 +155,14 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
// We wait to assign conditions here because the generation number can change in between reopen() runs.
req.URL.RawQuery = conditionsQuery(gen, o.conds)
if err := setConditionsHeaders(req.Header, o.conds); err != nil {
return nil, err
}
// If an object generation is specified, include generation as query string parameters.
if gen >= 0 {
req.URL.RawQuery = fmt.Sprintf("generation=%d", gen)
}
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
@ -216,6 +233,8 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
// Content range is formatted <first byte>-<last byte>/<total size>. We take
// the total size.
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
@ -320,6 +339,24 @@ func parseCRC32c(res *http.Response) (uint32, bool) {
return 0, false
}
// setConditionsHeaders sets precondition request headers for downloads
// using the XML API. It assumes that the conditions have been validated.
func setConditionsHeaders(headers http.Header, conds *Conditions) error {
if conds == nil {
return nil
}
if conds.MetagenerationMatch != 0 {
headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch))
}
switch {
case conds.GenerationMatch != 0:
headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch))
case conds.DoesNotExist:
headers.Set("x-goog-if-generation-match", "0")
}
return nil
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// Reader reads a Cloud Storage object.
@ -336,15 +373,36 @@ type Reader struct {
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
reopen func(seen int64) (*http.Response, error)
// The following fields are only for use in the gRPC hybrid client.
stream storagepb.Storage_ReadObjectClient
reopenWithGRPC func(seen int64) (*readStreamResponse, context.CancelFunc, error)
leftovers []byte
cancelStream context.CancelFunc
}
type readStreamResponse struct {
stream storagepb.Storage_ReadObjectClient
response *storagepb.ReadObjectResponse
}
// Close closes the Reader. It must be called when done reading.
func (r *Reader) Close() error {
return r.body.Close()
if r.body != nil {
return r.body.Close()
}
r.closeStream()
return nil
}
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.readWithRetry(p)
read := r.readWithRetry
if r.reopenWithGRPC != nil {
read = r.readWithGRPC
}
n, err := read(p)
if r.remain != -1 {
r.remain -= int64(n)
}
@ -363,17 +421,148 @@ func (r *Reader) Read(p []byte) (int, error) {
return n, err
}
// newRangeReaderWithGRPC creates a new Reader with the given range that uses
// gRPC to read Object content.
//
// This is an experimental API and not intended for public use.
func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, length int64) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.newRangeReaderWithGRPC")
defer func() { trace.EndSpan(ctx, err) }()
if o.c.gc == nil {
err = fmt.Errorf("handle doesn't have a gRPC client initialized")
return
}
if err = o.validate(); err != nil {
return
}
// A negative length means "read to the end of the object", but the
// read_limit field it corresponds to uses zero to mean the same thing. Thus
// we coerce the length to 0 to read to the end of the object.
if length < 0 {
length = 0
}
// For now, there are only globally unique buckets, and "_" is the alias
// project ID for such buckets.
b := bucketResourceName("_", o.bucket)
req := &storagepb.ReadObjectRequest{
Bucket: b,
Object: o.object,
}
// The default is a negative value, which means latest.
if o.gen >= 0 {
req.Generation = o.gen
}
// Define a function that initiates a Read with offset and length, assuming
// we have already read seen bytes.
reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) {
// If the context has already expired, return immediately without making
// we call.
if err := ctx.Err(); err != nil {
return nil, nil, err
}
cc, cancel := context.WithCancel(ctx)
start := offset + seen
// Only set a ReadLimit if length is greater than zero, because zero
// means read it all.
if length > 0 {
req.ReadLimit = length - seen
}
req.ReadOffset = start
setRequestConditions(req, o.conds)
var stream storagepb.Storage_ReadObjectClient
var msg *storagepb.ReadObjectResponse
var err error
err = runWithRetry(cc, func() error {
stream, err = o.c.gc.ReadObject(cc, req)
if err != nil {
return err
}
msg, err = stream.Recv()
return err
})
if err != nil {
// Close the stream context we just created to ensure we don't leak
// resources.
cancel()
return nil, nil, err
}
return &readStreamResponse{stream, msg}, cancel, nil
}
res, cancel, err := reopen(0)
if err != nil {
return nil, err
}
r = &Reader{
stream: res.stream,
reopenWithGRPC: reopen,
cancelStream: cancel,
}
// The first message was Recv'd on stream open, use it to populate the
// object metadata.
msg := res.response
obj := msg.GetMetadata()
// This is the size of the entire object, even if only a range was requested.
size := obj.GetSize()
r.Attrs = ReaderObjectAttrs{
Size: size,
ContentType: obj.GetContentType(),
ContentEncoding: obj.GetContentEncoding(),
CacheControl: obj.GetCacheControl(),
LastModified: obj.GetUpdateTime().AsTime(),
Metageneration: obj.GetMetageneration(),
Generation: obj.GetGeneration(),
}
r.size = size
cr := msg.GetContentRange()
if cr != nil {
r.Attrs.StartOffset = cr.GetStart()
r.remain = cr.GetEnd() - cr.GetStart() + 1
} else {
r.remain = size
}
// Only support checksums when reading an entire object, not a range.
if msg.GetObjectChecksums().Crc32C != nil && offset == 0 && length == 0 {
r.wantCRC = msg.GetObjectChecksums().GetCrc32C()
r.checkCRC = true
}
// Store the content from the first Recv in the client buffer for reading
// later.
r.leftovers = msg.GetChecksummedData().GetContent()
return r, nil
}
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if !shouldRetryRead(err) {
if err == nil || err == io.EOF {
return n, err
}
// Read failed, but we will try again. Send a ranged read request that takes
// into account the number of bytes we've already seen.
// Read failed (likely due to connection issues), but we will try to reopen
// the pipe and continue. Send a ranged read request that takes into account
// the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
@ -385,11 +574,136 @@ func (r *Reader) readWithRetry(p []byte) (int, error) {
return n, nil
}
func shouldRetryRead(err error) bool {
if err == nil {
return false
// closeStream cancels a stream's context in order for it to be closed and
// collected.
//
// This is an experimental API and not intended for public use.
func (r *Reader) closeStream() {
if r.cancelStream != nil {
r.cancelStream()
}
r.stream = nil
}
// readWithGRPC reads bytes into the user's buffer from an open gRPC stream.
//
// This is an experimental API and not intended for public use.
func (r *Reader) readWithGRPC(p []byte) (int, error) {
// No stream to read from, either never initiliazed or Close was called.
// Note: There is a potential concurrency issue if multiple routines are
// using the same reader. One encounters an error and the stream is closed
// and then reopened while the other routine attempts to read from it.
if r.stream == nil {
return 0, fmt.Errorf("reader has been closed")
}
// The entire object has been read by this reader, return EOF.
if r.size != 0 && r.size == r.seen {
return 0, io.EOF
}
var n int
// Read leftovers and return what was available to conform to the Reader
// interface: https://pkg.go.dev/io#Reader.
if len(r.leftovers) > 0 {
n = copy(p, r.leftovers)
r.seen += int64(n)
r.leftovers = r.leftovers[n:]
return n, nil
}
// Attempt to Recv the next message on the stream.
msg, err := r.recv()
if err != nil {
return 0, err
}
// TODO: Determine if we need to capture incremental CRC32C for this
// chunk. The Object CRC32C checksum is captured when directed to read
// the entire Object. If directed to read a range, we may need to
// calculate the range's checksum for verification if the checksum is
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
content := msg.GetChecksummedData().GetContent()
n = copy(p[n:], content)
leftover := len(content) - n
if leftover > 0 {
// Wasn't able to copy all of the data in the message, store for
// future Read calls.
// TODO: Instead of acquiring a new block of memory, should we reuse
// the existing leftovers slice, expanding it if necessary?
r.leftovers = make([]byte, leftover)
copy(r.leftovers, content[n:])
}
r.seen += int64(n)
return n, nil
}
// recv attempts to Recv the next message on the stream. In the event
// that a retryable error is encountered, the stream will be closed, reopened,
// and Recv again. This will attempt to Recv until one of the following is true:
//
// * Recv is successful
// * A non-retryable error is encountered
// * The Reader's context is canceled
//
// The last error received is the one that is returned, which could be from
// an attempt to reopen the stream.
//
// This is an experimental API and not intended for public use.
func (r *Reader) recv() (*storagepb.ReadObjectResponse, error) {
msg, err := r.stream.Recv()
if err != nil && shouldRetry(err) {
// This will "close" the existing stream and immediately attempt to
// reopen the stream, but will backoff if further attempts are necessary.
// Reopening the stream Recvs the first message, so if retrying is
// successful, the next logical chunk will be returned.
msg, err = r.reopenStream(r.seen)
}
return msg, err
}
// reopenStream "closes" the existing stream and attempts to reopen a stream and
// sets the Reader's stream and cancelStream properties in the process.
//
// This is an experimental API and not intended for public use.
func (r *Reader) reopenStream(seen int64) (*storagepb.ReadObjectResponse, error) {
// Close existing stream and initialize new stream with updated offset.
r.closeStream()
res, cancel, err := r.reopenWithGRPC(r.seen)
if err != nil {
return nil, err
}
r.stream = res.stream
r.cancelStream = cancel
return res.response, nil
}
// setRequestConditions is used to apply the given Conditions to a gRPC request
// message.
//
// This is an experimental API and not intended for public use.
func setRequestConditions(req *storagepb.ReadObjectRequest, conds *Conditions) {
if conds == nil {
return
}
if conds.MetagenerationMatch != 0 {
req.IfMetagenerationMatch = proto.Int64(conds.MetagenerationMatch)
} else if conds.MetagenerationNotMatch != 0 {
req.IfMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch)
}
switch {
case conds.GenerationNotMatch != 0:
req.IfGenerationNotMatch = proto.Int64(conds.GenerationNotMatch)
case conds.GenerationMatch != 0:
req.IfGenerationMatch = proto.Int64(conds.GenerationMatch)
case conds.DoesNotExist:
req.IfGenerationMatch = proto.Int64(0)
}
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
}
// Size returns the size of the object in bytes.