build(deps): bump cloud.google.com/go/storage from 1.22.1 to 1.26.0
Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.22.1 to 1.26.0. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.22.1...spanner/v1.26.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
58c87198c6
commit
efddacc682
98 changed files with 51657 additions and 50548 deletions
2
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
2
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
|
|
@ -1,3 +1,3 @@
|
|||
{
|
||||
"storage": "1.22.1"
|
||||
"storage": "1.26.0"
|
||||
}
|
||||
40
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
40
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
|
|
@ -1,6 +1,46 @@
|
|||
# Changes
|
||||
|
||||
|
||||
## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e))
|
||||
|
||||
## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f))
|
||||
* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e))
|
||||
|
||||
## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d))
|
||||
|
||||
## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d))
|
||||
* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473))
|
||||
|
||||
### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19)
|
||||
|
||||
|
||||
|
|
|
|||
104
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
104
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
|
|
@ -20,9 +20,8 @@ import (
|
|||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"google.golang.org/api/googleapi"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
|
|
@ -67,6 +66,8 @@ type ProjectTeam struct {
|
|||
}
|
||||
|
||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
||||
// ACLHandle on an object operates on the latest generation of that object by default.
|
||||
// Selecting a specific generation of an object is not currently supported by the client.
|
||||
type ACLHandle struct {
|
||||
c *Client
|
||||
bucket string
|
||||
|
|
@ -119,111 +120,46 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
|||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.BucketAccessControls
|
||||
var err error
|
||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toBucketACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||
a.configureCall(ctx, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, a.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
||||
return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
if isBucketDefault {
|
||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
||||
} else {
|
||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||
return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...)
|
||||
}
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||
a.configureCall(ctx, req)
|
||||
return run(ctx, func() error {
|
||||
return req.Do()
|
||||
}, a.retry, false, setRetryHeaderHTTP(req))
|
||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
||||
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
||||
|
|
|
|||
465
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
465
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
|
|
@ -20,21 +20,19 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"github.com/googleapis/go-type-adapters/adapters"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iamcredentials/v1"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/genproto/googleapis/storage/v2"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
dpb "google.golang.org/genproto/googleapis/type/date"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
|
|
@ -56,7 +54,8 @@ type BucketHandle struct {
|
|||
// The supplied name must contain only lowercase letters, numbers, dashes,
|
||||
// underscores, and dots. The full specification for valid bucket names can be
|
||||
// found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
//
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (c *Client) Bucket(name string) *BucketHandle {
|
||||
retry := c.retry.clone()
|
||||
return &BucketHandle{
|
||||
|
|
@ -83,27 +82,11 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
var bkt *raw.Bucket
|
||||
if attrs != nil {
|
||||
bkt = attrs.toRawBucket()
|
||||
} else {
|
||||
bkt = &raw.Bucket{}
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil {
|
||||
return err
|
||||
}
|
||||
bkt.Name = b.name
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
||||
bkt.Location = "US"
|
||||
}
|
||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
||||
setClientHeader(req.Header())
|
||||
if attrs != nil && attrs.PredefinedACL != "" {
|
||||
req.PredefinedAcl(attrs.PredefinedACL)
|
||||
}
|
||||
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
|
||||
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
|
||||
}
|
||||
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true, setRetryHeaderHTTP(req))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
|
|
@ -111,24 +94,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) {
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newDeleteCall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
||||
req := b.c.raw.Buckets.Delete(b.name)
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
|
||||
|
|
@ -151,7 +118,8 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
|||
//
|
||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
||||
// for valid object names can be found at:
|
||||
// https://cloud.google.com/storage/docs/naming-objects
|
||||
//
|
||||
// https://cloud.google.com/storage/docs/naming-objects
|
||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
retry := b.retry.clone()
|
||||
return &ObjectHandle{
|
||||
|
|
@ -176,35 +144,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newGetCall()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *raw.Bucket
|
||||
err = run(ctx, func() error {
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, true, setRetryHeaderHTTP(req))
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrBucketNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(resp)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.GetBucket(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// Update updates a bucket's attributes.
|
||||
|
|
@ -212,43 +153,9 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newPatchCall(&uattrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if uattrs.PredefinedACL != "" {
|
||||
req.PredefinedAcl(uattrs.PredefinedACL)
|
||||
}
|
||||
if uattrs.PredefinedDefaultObjectACL != "" {
|
||||
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
|
||||
}
|
||||
|
||||
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
|
||||
|
||||
var rawBucket *raw.Bucket
|
||||
call := func() error {
|
||||
rb, err := req.Context(ctx).Do()
|
||||
rawBucket = rb
|
||||
return err
|
||||
}
|
||||
|
||||
if err := run(ctx, call, b.retry, isIdempotent, setRetryHeaderHTTP(req)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(rawBucket)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
||||
rb := uattrs.toRawBucket()
|
||||
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
o := makeStorageOpts(isIdempotent, b.retry, b.userProject)
|
||||
return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...)
|
||||
}
|
||||
|
||||
// SignedURL returns a URL for the specified object. Signed URLs allow anyone
|
||||
|
|
@ -461,8 +368,13 @@ type BucketAttrs struct {
|
|||
PredefinedDefaultObjectACL string
|
||||
|
||||
// Location is the location of the bucket. It defaults to "US".
|
||||
// If specifying a dual-region, CustomPlacementConfig should be set in conjunction.
|
||||
Location string
|
||||
|
||||
// The bucket's custom placement configuration that holds a list of
|
||||
// regional locations for custom dual regions.
|
||||
CustomPlacementConfig *CustomPlacementConfig
|
||||
|
||||
// MetaGeneration is the metadata generation of the bucket.
|
||||
// This field is read-only.
|
||||
MetaGeneration int64
|
||||
|
|
@ -645,6 +557,13 @@ const (
|
|||
// SetStorageClassAction changes the storage class of live and/or archived
|
||||
// objects.
|
||||
SetStorageClassAction = "SetStorageClass"
|
||||
|
||||
// AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete
|
||||
// multipart upload when the multipart upload meets the conditions specified
|
||||
// in the lifecycle rule. The AgeInDays condition is the only allowed
|
||||
// condition for this action. AgeInDays is measured from the time the
|
||||
// multipart upload was created.
|
||||
AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload"
|
||||
)
|
||||
|
||||
// LifecycleRule is a lifecycle configuration rule.
|
||||
|
|
@ -665,9 +584,8 @@ type LifecycleRule struct {
|
|||
type LifecycleAction struct {
|
||||
// Type is the type of action to take on matching objects.
|
||||
//
|
||||
// Acceptable values are "Delete" to delete matching objects and
|
||||
// "SetStorageClass" to set the storage class defined in StorageClass on
|
||||
// matching objects.
|
||||
// Acceptable values are storage.DeleteAction, storage.SetStorageClassAction,
|
||||
// and storage.AbortIncompleteMPUAction.
|
||||
Type string
|
||||
|
||||
// StorageClass is the storage class to set on matching objects if the Action
|
||||
|
|
@ -692,7 +610,12 @@ const (
|
|||
//
|
||||
// All configured conditions must be met for the associated action to be taken.
|
||||
type LifecycleCondition struct {
|
||||
// AllObjects is used to select all objects in a bucket by
|
||||
// setting AgeInDays to 0.
|
||||
AllObjects bool
|
||||
|
||||
// AgeInDays is the age of the object in days.
|
||||
// If you want to set AgeInDays to `0` use AllObjects set to `true`.
|
||||
AgeInDays int64
|
||||
|
||||
// CreatedBefore is the time the object was created.
|
||||
|
|
@ -710,21 +633,31 @@ type LifecycleCondition struct {
|
|||
|
||||
// DaysSinceCustomTime is the days elapsed since the CustomTime date of the
|
||||
// object. This condition can only be satisfied if CustomTime has been set.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
DaysSinceCustomTime int64
|
||||
|
||||
// DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp
|
||||
// of the object. This condition is relevant only for versioned objects.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
DaysSinceNoncurrentTime int64
|
||||
|
||||
// Liveness specifies the object's liveness. Relevant only for versioned objects
|
||||
Liveness Liveness
|
||||
|
||||
// MatchesPrefix is the condition matching an object if any of the
|
||||
// matches_prefix strings are an exact prefix of the object's name.
|
||||
MatchesPrefix []string
|
||||
|
||||
// MatchesStorageClasses is the condition matching the object's storage
|
||||
// class.
|
||||
//
|
||||
// Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE".
|
||||
MatchesStorageClasses []string
|
||||
|
||||
// MatchesSuffix is the condition matching an object if any of the
|
||||
// matches_suffix strings are an exact suffix of the object's name.
|
||||
MatchesSuffix []string
|
||||
|
||||
// NoncurrentTimeBefore is the noncurrent timestamp of the object. This
|
||||
// condition is satisfied when an object's noncurrent timestamp is before
|
||||
// midnight of the specified date in UTC.
|
||||
|
|
@ -737,6 +670,7 @@ type LifecycleCondition struct {
|
|||
// If the value is N, this condition is satisfied when there are at least N
|
||||
// versions (including the live version) newer than this version of the
|
||||
// object.
|
||||
// Note: Using `0` as the value will be ignored by the library and not sent to the API.
|
||||
NumNewerVersions int64
|
||||
}
|
||||
|
||||
|
|
@ -768,6 +702,15 @@ type BucketWebsite struct {
|
|||
NotFoundPage string
|
||||
}
|
||||
|
||||
// CustomPlacementConfig holds the bucket's custom placement
|
||||
// configuration for Custom Dual Regions. See
|
||||
// https://cloud.google.com/storage/docs/locations#location-dr for more information.
|
||||
type CustomPlacementConfig struct {
|
||||
// The list of regional locations in which data is placed.
|
||||
// Custom Dual Regions require exactly 2 regional locations.
|
||||
DataLocations []string
|
||||
}
|
||||
|
||||
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||
if b == nil {
|
||||
return nil, nil
|
||||
|
|
@ -801,6 +744,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
|||
LocationType: b.LocationType,
|
||||
ProjectNumber: b.ProjectNumber,
|
||||
RPO: toRPO(b),
|
||||
CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -831,6 +775,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
|
|||
PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()),
|
||||
LocationType: b.GetLocationType(),
|
||||
RPO: toRPOFromProto(b),
|
||||
CustomPlacementConfig: customPlacementFromProto(b.GetCustomPlacementConfig()),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -868,22 +813,23 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
|||
}
|
||||
}
|
||||
return &raw.Bucket{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toRawBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
|
||||
Cors: toRawCORS(b.CORS),
|
||||
Encryption: b.Encryption.toRawBucketEncryption(),
|
||||
Logging: b.Logging.toRawBucketLogging(),
|
||||
Website: b.Website.toRawBucketWebsite(),
|
||||
IamConfiguration: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toRawBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
|
||||
Cors: toRawCORS(b.CORS),
|
||||
Encryption: b.Encryption.toRawBucketEncryption(),
|
||||
Logging: b.Logging.toRawBucketLogging(),
|
||||
Website: b.Website.toRawBucketWebsite(),
|
||||
IamConfiguration: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -910,7 +856,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
|
|||
}
|
||||
var bb *storagepb.Bucket_Billing
|
||||
if b.RequesterPays {
|
||||
bb = &storage.Bucket_Billing{RequesterPays: true}
|
||||
bb = &storagepb.Bucket_Billing{RequesterPays: true}
|
||||
}
|
||||
var bktIAM *storagepb.Bucket_IamConfig
|
||||
if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
|
|
@ -926,22 +872,23 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
|
|||
}
|
||||
|
||||
return &storagepb.Bucket{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toProtoBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toProtoLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(),
|
||||
Cors: toProtoCORS(b.CORS),
|
||||
Encryption: b.Encryption.toProtoBucketEncryption(),
|
||||
Logging: b.Logging.toProtoBucketLogging(),
|
||||
Website: b.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toProtoBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toProtoLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(),
|
||||
Cors: toProtoCORS(b.CORS),
|
||||
Encryption: b.Encryption.toProtoBucketEncryption(),
|
||||
Logging: b.Logging.toProtoBucketLogging(),
|
||||
Website: b.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -958,7 +905,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
|
|||
}
|
||||
var bb *storagepb.Bucket_Billing
|
||||
if ua.RequesterPays != nil {
|
||||
bb = &storage.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
|
||||
bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
|
||||
}
|
||||
var bktIAM *storagepb.Bucket_IamConfig
|
||||
var ublaEnabled bool
|
||||
|
|
@ -1333,15 +1280,8 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
|
|||
// most customers. It might be changed in backwards-incompatible ways and is not
|
||||
// subject to any SLA or deprecation policy.
|
||||
func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
|
||||
var metageneration int64
|
||||
if b.conds != nil {
|
||||
metageneration = b.conds.MetagenerationMatch
|
||||
}
|
||||
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, true, setRetryHeaderHTTP(req))
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...)
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||
|
|
@ -1501,14 +1441,25 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
|||
StorageClass: r.Action.StorageClass,
|
||||
},
|
||||
Condition: &raw.BucketLifecycleRuleCondition{
|
||||
Age: r.Condition.AgeInDays,
|
||||
DaysSinceCustomTime: r.Condition.DaysSinceCustomTime,
|
||||
DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime,
|
||||
MatchesPrefix: r.Condition.MatchesPrefix,
|
||||
MatchesStorageClass: r.Condition.MatchesStorageClasses,
|
||||
MatchesSuffix: r.Condition.MatchesSuffix,
|
||||
NumNewerVersions: r.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
|
||||
// AllObjects takes precedent when both AllObjects and AgeInDays are set
|
||||
// Rationale: If you've opted into using AllObjects, it makes sense that you
|
||||
// understand the implications of how this option works with AgeInDays.
|
||||
if r.Condition.AllObjects {
|
||||
rr.Condition.Age = googleapi.Int64(0)
|
||||
rr.Condition.ForceSendFields = []string{"Age"}
|
||||
} else if r.Condition.AgeInDays > 0 {
|
||||
rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays)
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
rr.Condition.IsLive = nil
|
||||
|
|
@ -1549,11 +1500,18 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
|||
AgeDays: proto.Int32(int32(r.Condition.AgeInDays)),
|
||||
DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)),
|
||||
DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)),
|
||||
MatchesPrefix: r.Condition.MatchesPrefix,
|
||||
MatchesStorageClass: r.Condition.MatchesStorageClasses,
|
||||
MatchesSuffix: r.Condition.MatchesSuffix,
|
||||
NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)),
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(#6205): This may not be needed for gRPC
|
||||
if r.Condition.AllObjects {
|
||||
rr.Condition.AgeDays = proto.Int32(0)
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
rr.Condition.IsLive = nil
|
||||
|
|
@ -1564,13 +1522,13 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
|||
}
|
||||
|
||||
if !r.Condition.CreatedBefore.IsZero() {
|
||||
rr.Condition.CreatedBefore = adapters.TimeToProtoDate(r.Condition.CreatedBefore)
|
||||
rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore)
|
||||
}
|
||||
if !r.Condition.CustomTimeBefore.IsZero() {
|
||||
rr.Condition.CustomTimeBefore = adapters.TimeToProtoDate(r.Condition.CustomTimeBefore)
|
||||
rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore)
|
||||
}
|
||||
if !r.Condition.NoncurrentTimeBefore.IsZero() {
|
||||
rr.Condition.NoncurrentTimeBefore = adapters.TimeToProtoDate(r.Condition.NoncurrentTimeBefore)
|
||||
rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore)
|
||||
}
|
||||
rl.Rule = append(rl.Rule, rr)
|
||||
}
|
||||
|
|
@ -1589,13 +1547,20 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
|||
StorageClass: rr.Action.StorageClass,
|
||||
},
|
||||
Condition: LifecycleCondition{
|
||||
AgeInDays: rr.Condition.Age,
|
||||
DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime,
|
||||
DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime,
|
||||
MatchesPrefix: rr.Condition.MatchesPrefix,
|
||||
MatchesStorageClasses: rr.Condition.MatchesStorageClass,
|
||||
MatchesSuffix: rr.Condition.MatchesSuffix,
|
||||
NumNewerVersions: rr.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
if rr.Condition.Age != nil {
|
||||
r.Condition.AgeInDays = *rr.Condition.Age
|
||||
if *rr.Condition.Age == 0 {
|
||||
r.Condition.AllObjects = true
|
||||
}
|
||||
}
|
||||
|
||||
if rr.Condition.IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
|
|
@ -1634,11 +1599,18 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
|
|||
AgeInDays: int64(rr.GetCondition().GetAgeDays()),
|
||||
DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()),
|
||||
DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()),
|
||||
MatchesPrefix: rr.GetCondition().GetMatchesPrefix(),
|
||||
MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(),
|
||||
MatchesSuffix: rr.GetCondition().GetMatchesSuffix(),
|
||||
NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()),
|
||||
},
|
||||
}
|
||||
|
||||
// TODO(#6205): This may not be needed for gRPC
|
||||
if rr.GetCondition().GetAgeDays() == 0 {
|
||||
r.Condition.AllObjects = true
|
||||
}
|
||||
|
||||
if rr.GetCondition().IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
} else if rr.GetCondition().GetIsLive() {
|
||||
|
|
@ -1648,13 +1620,13 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
|
|||
}
|
||||
|
||||
if rr.GetCondition().GetCreatedBefore() != nil {
|
||||
r.Condition.CreatedBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
|
||||
r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
|
||||
}
|
||||
if rr.GetCondition().GetCustomTimeBefore() != nil {
|
||||
r.Condition.CustomTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
|
||||
r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
|
||||
}
|
||||
if rr.GetCondition().GetNoncurrentTimeBefore() != nil {
|
||||
r.Condition.NoncurrentTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
|
||||
r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
|
||||
}
|
||||
l.Rules = append(l.Rules, r)
|
||||
}
|
||||
|
|
@ -1708,7 +1680,7 @@ func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging {
|
|||
return nil
|
||||
}
|
||||
return &storagepb.Bucket_Logging{
|
||||
LogBucket: b.LogBucket,
|
||||
LogBucket: bucketResourceName(globalProjectAlias, b.LogBucket),
|
||||
LogObjectPrefix: b.LogObjectPrefix,
|
||||
}
|
||||
}
|
||||
|
|
@ -1727,8 +1699,9 @@ func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging {
|
|||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
lb := parseBucketName(b.GetLogBucket())
|
||||
return &BucketLogging{
|
||||
LogBucket: b.GetLogBucket(),
|
||||
LogBucket: lb,
|
||||
LogObjectPrefix: b.GetLogObjectPrefix(),
|
||||
}
|
||||
}
|
||||
|
|
@ -1881,24 +1854,46 @@ func toRPOFromProto(b *storagepb.Bucket) RPO {
|
|||
}
|
||||
}
|
||||
|
||||
func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &CustomPlacementConfig{DataLocations: c.DataLocations}
|
||||
}
|
||||
|
||||
func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &raw.BucketCustomPlacementConfig{
|
||||
DataLocations: c.DataLocations,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &storagepb.Bucket_CustomPlacementConfig{
|
||||
DataLocations: c.DataLocations,
|
||||
}
|
||||
}
|
||||
|
||||
func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return &CustomPlacementConfig{DataLocations: c.GetDataLocations()}
|
||||
}
|
||||
|
||||
// Objects returns an iterator over the objects in the bucket that match the
|
||||
// Query q. If q is nil, no filtering is done. Objects will be iterated over
|
||||
// lexicographically by name.
|
||||
//
|
||||
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
|
||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
||||
it := &ObjectIterator{
|
||||
ctx: ctx,
|
||||
bucket: b,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
if q != nil {
|
||||
it.query = *q
|
||||
}
|
||||
return it
|
||||
o := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.ListObjects(ctx, b.name, q, o...)
|
||||
}
|
||||
|
||||
// Retryer returns a bucket handle that is configured with custom retry
|
||||
|
|
@ -1933,7 +1928,6 @@ func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle {
|
|||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
type ObjectIterator struct {
|
||||
ctx context.Context
|
||||
bucket *BucketHandle
|
||||
query Query
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
|
@ -1970,52 +1964,6 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
|||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
||||
setClientHeader(req.Header())
|
||||
projection := it.query.Projection
|
||||
if projection == ProjectionDefault {
|
||||
projection = ProjectionFull
|
||||
}
|
||||
req.Projection(projection.String())
|
||||
req.Delimiter(it.query.Delimiter)
|
||||
req.Prefix(it.query.Prefix)
|
||||
req.StartOffset(it.query.StartOffset)
|
||||
req.EndOffset(it.query.EndOffset)
|
||||
req.Versions(it.query.Versions)
|
||||
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
|
||||
if len(it.query.fieldSelection) > 0 {
|
||||
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
|
||||
}
|
||||
req.PageToken(pageToken)
|
||||
if it.bucket.userProject != "" {
|
||||
req.UserProject(it.bucket.userProject)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Objects
|
||||
var err error
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
}, it.bucket.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
err = ErrBucketNotExist
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.items = append(it.items, newObject(item))
|
||||
}
|
||||
for _, prefix := range resp.Prefixes {
|
||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// Buckets returns an iterator over the buckets in the project. You may
|
||||
// optionally set the iterator's Prefix field to restrict the list to buckets
|
||||
// whose names begin with the prefix. By default, all buckets in the project
|
||||
|
|
@ -2023,17 +1971,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
|
|||
//
|
||||
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
|
||||
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
|
||||
it := &BucketIterator{
|
||||
ctx: ctx,
|
||||
client: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.buckets) },
|
||||
func() interface{} { b := it.buckets; it.buckets = nil; return b })
|
||||
|
||||
return it
|
||||
o := makeStorageOpts(true, c.retry, "")
|
||||
return c.tc.ListBuckets(ctx, projectID, o...)
|
||||
}
|
||||
|
||||
// A BucketIterator is an iterator over BucketAttrs.
|
||||
|
|
@ -2044,7 +1983,6 @@ type BucketIterator struct {
|
|||
Prefix string
|
||||
|
||||
ctx context.Context
|
||||
client *Client
|
||||
projectID string
|
||||
buckets []*BucketAttrs
|
||||
pageInfo *iterator.PageInfo
|
||||
|
|
@ -2070,36 +2008,6 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) {
|
|||
// Note: This method is not safe for concurrent operations without explicit synchronization.
|
||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// TODO: When the transport-agnostic client interface is integrated into the Veneer,
|
||||
// this method should be removed, and the iterator should be initialized by the
|
||||
// transport-specific client implementations.
|
||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
||||
req := it.client.raw.Buckets.List(it.projectID)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Prefix(it.Prefix)
|
||||
req.PageToken(pageToken)
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Buckets
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
}, it.client.retry, true, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
b, err := newBucket(item)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.buckets = append(it.buckets, b)
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// RPO (Recovery Point Objective) configures the turbo replication feature. See
|
||||
// https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
|
||||
type RPO int
|
||||
|
|
@ -2135,3 +2043,28 @@ func (rpo RPO) String() string {
|
|||
return rpoUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC.
|
||||
//
|
||||
// Hours, minutes, seconds, and nanoseconds are set to 0.
|
||||
func protoDateToUTCTime(d *dpb.Date) time.Time {
|
||||
return protoDateToTime(d, time.UTC)
|
||||
}
|
||||
|
||||
// protoDateToTime returns a new Time based on the google.type.Date and provided
|
||||
// *time.Location.
|
||||
//
|
||||
// Hours, minutes, seconds, and nanoseconds are set to 0.
|
||||
func protoDateToTime(d *dpb.Date, l *time.Location) time.Time {
|
||||
return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l)
|
||||
}
|
||||
|
||||
// timeToProtoDate returns a new google.type.Date based on the provided time.Time.
|
||||
// The location is ignored, as is anything more precise than the day.
|
||||
func timeToProtoDate(t time.Time) *dpb.Date {
|
||||
return &dpb.Date{
|
||||
Year: int32(t.Year()),
|
||||
Month: int32(t.Month()),
|
||||
Day: int32(t.Day()),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
141
vendor/cloud.google.com/go/storage/client.go
generated
vendored
141
vendor/cloud.google.com/go/storage/client.go
generated
vendored
|
|
@ -16,6 +16,8 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/option"
|
||||
|
|
@ -42,7 +44,7 @@ type storageClient interface {
|
|||
// Top-level methods.
|
||||
|
||||
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
|
||||
CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
|
||||
CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
|
||||
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
|
||||
Close() error
|
||||
|
||||
|
|
@ -56,35 +58,35 @@ type storageClient interface {
|
|||
|
||||
// Object metadata methods.
|
||||
|
||||
DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error
|
||||
GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
|
||||
UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
|
||||
DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error
|
||||
GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
|
||||
UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
|
||||
|
||||
// Default Object ACL methods.
|
||||
|
||||
DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
||||
ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Bucket ACL methods.
|
||||
|
||||
DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
||||
ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Object ACL methods.
|
||||
|
||||
DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error
|
||||
ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error)
|
||||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error)
|
||||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
||||
|
||||
// Media operations.
|
||||
|
||||
ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error)
|
||||
RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error)
|
||||
|
||||
OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error
|
||||
OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error
|
||||
NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error)
|
||||
OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error)
|
||||
|
||||
// IAM methods.
|
||||
|
||||
|
|
@ -94,11 +96,16 @@ type storageClient interface {
|
|||
|
||||
// HMAC Key methods.
|
||||
|
||||
GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error)
|
||||
ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator
|
||||
UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
|
||||
CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error)
|
||||
DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error
|
||||
GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error)
|
||||
ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator
|
||||
UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
|
||||
CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error)
|
||||
DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error
|
||||
|
||||
// Notification methods.
|
||||
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error)
|
||||
CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error)
|
||||
DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error
|
||||
}
|
||||
|
||||
// settings contains transport-agnostic configuration for API calls made via
|
||||
|
|
@ -155,6 +162,20 @@ func callSettings(defaults *settings, opts ...storageOption) *settings {
|
|||
return &cs
|
||||
}
|
||||
|
||||
// makeStorageOpts is a helper for generating a set of storageOption based on
|
||||
// idempotency, retryConfig, and userProject. All top-level client operations
|
||||
// will generally have to pass these options through the interface.
|
||||
func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption {
|
||||
opts := []storageOption{idempotent(isIdempotent)}
|
||||
if retry != nil {
|
||||
opts = append(opts, withRetryConfig(retry))
|
||||
}
|
||||
if userProject != "" {
|
||||
opts = append(opts, withUserProject(userProject))
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// storageOption is the transport-agnostic call option for the storageClient
|
||||
// interface.
|
||||
type storageOption interface {
|
||||
|
|
@ -211,24 +232,93 @@ type userProjectOption struct {
|
|||
|
||||
func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project }
|
||||
|
||||
type openWriterParams struct {
|
||||
// Writer configuration
|
||||
|
||||
// ctx is the context used by the writer routine to make all network calls
|
||||
// and to manage the writer routine - see `Writer.ctx`.
|
||||
// Required.
|
||||
ctx context.Context
|
||||
// chunkSize - see `Writer.ChunkSize`.
|
||||
// Optional.
|
||||
chunkSize int
|
||||
// chunkRetryDeadline - see `Writer.ChunkRetryDeadline`.
|
||||
// Optional.
|
||||
chunkRetryDeadline time.Duration
|
||||
|
||||
// Object/request properties
|
||||
|
||||
// bucket - see `Writer.o.bucket`.
|
||||
// Required.
|
||||
bucket string
|
||||
// attrs - see `Writer.ObjectAttrs`.
|
||||
// Required.
|
||||
attrs *ObjectAttrs
|
||||
// conds - see `Writer.o.conds`.
|
||||
// Optional.
|
||||
conds *Conditions
|
||||
// encryptionKey - see `Writer.o.encryptionKey`
|
||||
// Optional.
|
||||
encryptionKey []byte
|
||||
// sendCRC32C - see `Writer.SendCRC32C`.
|
||||
// Optional.
|
||||
sendCRC32C bool
|
||||
|
||||
// Writer callbacks
|
||||
|
||||
// donec - see `Writer.donec`.
|
||||
// Required.
|
||||
donec chan struct{}
|
||||
// setError callback for reporting errors - see `Writer.error`.
|
||||
// Required.
|
||||
setError func(error)
|
||||
// progress callback for reporting upload progress - see `Writer.progress`.
|
||||
// Required.
|
||||
progress func(int64)
|
||||
// setObj callback for reporting the resulting object - see `Writer.obj`.
|
||||
// Required.
|
||||
setObj func(*ObjectAttrs)
|
||||
}
|
||||
|
||||
type newRangeReaderParams struct {
|
||||
bucket string
|
||||
conds *Conditions
|
||||
encryptionKey []byte
|
||||
gen int64
|
||||
length int64
|
||||
object string
|
||||
offset int64
|
||||
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
|
||||
}
|
||||
|
||||
type composeObjectRequest struct {
|
||||
dstBucket string
|
||||
dstObject string
|
||||
srcs []string
|
||||
dstObject destinationObject
|
||||
srcs []sourceObject
|
||||
predefinedACL string
|
||||
sendCRC32C bool
|
||||
}
|
||||
|
||||
type sourceObject struct {
|
||||
name string
|
||||
bucket string
|
||||
gen int64
|
||||
conds *Conditions
|
||||
predefinedACL string
|
||||
encryptionKey []byte
|
||||
}
|
||||
|
||||
type destinationObject struct {
|
||||
name string
|
||||
bucket string
|
||||
conds *Conditions
|
||||
attrs *ObjectAttrs // attrs to set on the destination object.
|
||||
encryptionKey []byte
|
||||
keyName string
|
||||
}
|
||||
|
||||
type rewriteObjectRequest struct {
|
||||
srcBucket string
|
||||
srcObject string
|
||||
dstBucket string
|
||||
dstObject string
|
||||
dstKeyName string
|
||||
attrs *ObjectAttrs
|
||||
gen int64
|
||||
conds *Conditions
|
||||
srcObject sourceObject
|
||||
dstObject destinationObject
|
||||
predefinedACL string
|
||||
token string
|
||||
}
|
||||
|
|
@ -237,5 +327,6 @@ type rewriteObjectResponse struct {
|
|||
resource *ObjectAttrs
|
||||
done bool
|
||||
written int64
|
||||
size int64
|
||||
token string
|
||||
}
|
||||
|
|
|
|||
142
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
142
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
|
|
@ -20,7 +20,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// CopierFrom creates a Copier that can copy src to dst.
|
||||
|
|
@ -86,69 +85,57 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
|||
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
|
||||
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
|
||||
}
|
||||
if c.dst.gen != defaultGen {
|
||||
return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen)
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
rawObject := c.ObjectAttrs.toRawObject("")
|
||||
req := &rewriteObjectRequest{
|
||||
srcObject: sourceObject{
|
||||
name: c.src.object,
|
||||
bucket: c.src.bucket,
|
||||
gen: c.src.gen,
|
||||
conds: c.src.conds,
|
||||
encryptionKey: c.src.encryptionKey,
|
||||
},
|
||||
dstObject: destinationObject{
|
||||
name: c.dst.object,
|
||||
bucket: c.dst.bucket,
|
||||
conds: c.dst.conds,
|
||||
attrs: &c.ObjectAttrs,
|
||||
encryptionKey: c.dst.encryptionKey,
|
||||
keyName: c.DestinationKMSKeyName,
|
||||
},
|
||||
predefinedACL: c.PredefinedACL,
|
||||
token: c.RewriteToken,
|
||||
}
|
||||
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
var userProject string
|
||||
if c.dst.userProject != "" {
|
||||
userProject = c.dst.userProject
|
||||
} else if c.src.userProject != "" {
|
||||
userProject = c.src.userProject
|
||||
}
|
||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject)
|
||||
|
||||
for {
|
||||
res, err := c.callRewrite(ctx, rawObject)
|
||||
res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.token
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||
c.ProgressFunc(uint64(res.written), uint64(res.size))
|
||||
}
|
||||
if res.Done { // Finished successfully.
|
||||
return newObject(res.Resource), nil
|
||||
if res.done { // Finished successfully.
|
||||
return res.resource, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
||||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if c.RewriteToken != "" {
|
||||
call.RewriteToken(c.RewriteToken)
|
||||
}
|
||||
if c.DestinationKMSKeyName != "" {
|
||||
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
|
||||
}
|
||||
if c.PredefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||
}
|
||||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
} else if c.src.userProject != "" {
|
||||
call.UserProject(c.src.userProject)
|
||||
}
|
||||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
|
||||
retryCall := func() error { res, err = call.Do(); return err }
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
|
||||
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.RewriteToken
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||
// You can immediately call Run on the returned Composer, or you can
|
||||
// configure it first.
|
||||
|
|
@ -188,17 +175,13 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
|||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.gen != defaultGen {
|
||||
return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen)
|
||||
}
|
||||
if len(c.srcs) == 0 {
|
||||
return nil, errors.New("storage: at least one source object must be specified")
|
||||
}
|
||||
|
||||
req := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||
if c.SendCRC32C {
|
||||
req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C)
|
||||
}
|
||||
for _, src := range c.srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -209,36 +192,31 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
|||
if src.encryptionKey != nil {
|
||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||
}
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.object,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SourceObjects = append(req.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
req := &composeObjectRequest{
|
||||
dstBucket: c.dst.bucket,
|
||||
predefinedACL: c.PredefinedACL,
|
||||
sendCRC32C: c.SendCRC32C,
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
req.dstObject = destinationObject{
|
||||
name: c.dst.object,
|
||||
bucket: c.dst.bucket,
|
||||
conds: c.dst.conds,
|
||||
attrs: &c.ObjectAttrs,
|
||||
encryptionKey: c.dst.encryptionKey,
|
||||
}
|
||||
if c.PredefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(c.PredefinedACL)
|
||||
for _, src := range c.srcs {
|
||||
s := sourceObject{
|
||||
name: src.object,
|
||||
bucket: src.bucket,
|
||||
gen: src.gen,
|
||||
conds: src.conds,
|
||||
}
|
||||
req.srcs = append(req.srcs, s)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
|
||||
retryCall := func() error { obj, err = call.Do(); return err }
|
||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
||||
|
||||
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject)
|
||||
return c.dst.c.tc.ComposeObject(ctx, req, opts...)
|
||||
}
|
||||
|
|
|
|||
231
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
231
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
|
|
@ -22,16 +22,15 @@ https://cloud.google.com/storage/docs.
|
|||
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
|
||||
connection pooling and similar aspects of this package.
|
||||
|
||||
|
||||
Creating a Client
|
||||
# Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
The client will use your default application credentials. Clients should be
|
||||
reused instead of created as needed. The methods of Client are safe for
|
||||
|
|
@ -40,47 +39,47 @@ concurrent use by multiple goroutines.
|
|||
If you only wish to access public data, you can create
|
||||
an unauthenticated client with
|
||||
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
|
||||
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST
|
||||
environment variable to the address at which your emulator is running. This will
|
||||
send requests to that address instead of to Cloud Storage. You can then create
|
||||
and use a client as usual:
|
||||
|
||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Create client as usual.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Create client as usual.
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
||||
// instead of https://storage.googleapis.com/storage/v1/b
|
||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
||||
// instead of https://storage.googleapis.com/storage/v1/b
|
||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Please note that there is no official emulator for Cloud Storage.
|
||||
|
||||
Buckets
|
||||
# Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
bucket, make a bucket handle:
|
||||
|
||||
bkt := client.Bucket(bucketName)
|
||||
bkt := client.Bucket(bucketName)
|
||||
|
||||
A handle is a reference to a bucket. You can have a handle even if the
|
||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||
call Create on the handle:
|
||||
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that although buckets are associated with projects, bucket names are
|
||||
global across all projects.
|
||||
|
|
@ -90,14 +89,14 @@ BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
|||
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
||||
Attrs:
|
||||
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
|
||||
Objects
|
||||
# Objects
|
||||
|
||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||
refer to objects using a handle, just as with buckets, but unlike buckets
|
||||
|
|
@ -105,78 +104,78 @@ you don't explicitly create an object. Instead, the first time you write
|
|||
to an object it will be created. You can use the standard Go io.Reader
|
||||
and io.Writer interfaces to read and write object data:
|
||||
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
|
||||
Objects also have attributes, which you can fetch with Attrs:
|
||||
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
|
||||
Listing objects
|
||||
# Listing objects
|
||||
|
||||
Listing objects in a bucket is done with the Bucket.Objects method:
|
||||
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query := &storage.Query{Prefix: ""}
|
||||
|
||||
var names []string
|
||||
it := bkt.Objects(ctx, query)
|
||||
for {
|
||||
attrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
names = append(names, attrs.Name)
|
||||
}
|
||||
var names []string
|
||||
it := bkt.Objects(ctx, query)
|
||||
for {
|
||||
attrs, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
names = append(names, attrs.Name)
|
||||
}
|
||||
|
||||
Objects are listed lexicographically by name. To filter objects
|
||||
lexicographically, Query.StartOffset and/or Query.EndOffset can be used:
|
||||
|
||||
query := &storage.Query{
|
||||
Prefix: "",
|
||||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
||||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
||||
}
|
||||
query := &storage.Query{
|
||||
Prefix: "",
|
||||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
||||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
||||
}
|
||||
|
||||
// ... as before
|
||||
// ... as before
|
||||
|
||||
If only a subset of object attributes is needed when listing, specifying this
|
||||
subset using Query.SetAttrSelection may speed up the listing process:
|
||||
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query.SetAttrSelection([]string{"Name"})
|
||||
query := &storage.Query{Prefix: ""}
|
||||
query.SetAttrSelection([]string{"Name"})
|
||||
|
||||
// ... as before
|
||||
// ... as before
|
||||
|
||||
ACLs
|
||||
# ACLs
|
||||
|
||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||
|
|
@ -186,17 +185,17 @@ https://cloud.google.com/storage/docs/access-control/iam).
|
|||
|
||||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
||||
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
|
||||
You can also set and delete ACLs.
|
||||
|
||||
Conditions
|
||||
# Conditions
|
||||
|
||||
Every object has a generation and a metageneration. The generation changes
|
||||
whenever the content changes, and the metageneration changes whenever the
|
||||
|
|
@ -208,32 +207,32 @@ For example, say you've read an object's metadata into objAttrs. Now
|
|||
you want to write to that object, but only if its contents haven't changed
|
||||
since you read it. Here is how to express that:
|
||||
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
|
||||
Signed URLs
|
||||
# Signed URLs
|
||||
|
||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||
Signing a URL requires credentials authorized to sign a URL. To use the same
|
||||
authentication that was used when instantiating the Storage client, use the
|
||||
BucketHandle.SignedURL method.
|
||||
|
||||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
You can also sign a URL wihout creating a client. See the documentation of
|
||||
SignedURL for details.
|
||||
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
Post Policy V4 Signed Request
|
||||
# Post Policy V4 Signed Request
|
||||
|
||||
A type of signed request that allows uploads through HTML forms directly to Cloud Storage with
|
||||
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised
|
||||
|
|
@ -242,13 +241,13 @@ by a user.
|
|||
For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well
|
||||
as the documentation of BucketHandle.GenerateSignedPostPolicyV4.
|
||||
|
||||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
||||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
||||
|
||||
Errors
|
||||
# Errors
|
||||
|
||||
Errors returned by this client are often of the type googleapi.Error.
|
||||
These errors can be introspected for more information by using errors.As
|
||||
|
|
@ -261,7 +260,7 @@ with the richer googleapi.Error type. For example:
|
|||
|
||||
See https://pkg.go.dev/google.golang.org/api/googleapi#Error for more information.
|
||||
|
||||
Retrying failed requests
|
||||
# Retrying failed requests
|
||||
|
||||
Methods in this package may retry calls that fail with transient errors.
|
||||
Retrying continues indefinitely unless the controlling context is canceled, the
|
||||
|
|
|
|||
36
vendor/cloud.google.com/go/storage/go.mod
generated
vendored
36
vendor/cloud.google.com/go/storage/go.mod
generated
vendored
|
|
@ -1,20 +1,30 @@
|
|||
module cloud.google.com/go/storage
|
||||
|
||||
go 1.15
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.100.2
|
||||
cloud.google.com/go/compute v1.6.0
|
||||
cloud.google.com/go v0.102.1
|
||||
cloud.google.com/go/compute v1.7.0
|
||||
cloud.google.com/go/iam v0.3.0
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/google/go-cmp v0.5.7
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/googleapis/gax-go/v2 v2.3.0
|
||||
github.com/googleapis/go-type-adapters v1.0.0
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f
|
||||
google.golang.org/api v0.74.0
|
||||
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335
|
||||
google.golang.org/grpc v1.46.0
|
||||
google.golang.org/protobuf v1.28.0
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/googleapis/gax-go/v2 v2.4.0
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f
|
||||
google.golang.org/api v0.94.0
|
||||
google.golang.org/genproto v0.0.0-20220810155839-1856144b1d9c
|
||||
google.golang.org/grpc v1.48.0
|
||||
google.golang.org/protobuf v1.28.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/google/martian/v3 v3.2.1 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
)
|
||||
|
|
|
|||
72
vendor/cloud.google.com/go/storage/go.sum
generated
vendored
72
vendor/cloud.google.com/go/storage/go.sum
generated
vendored
|
|
@ -26,8 +26,10 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y
|
|||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
|
||||
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
|
||||
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
|
||||
cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0=
|
||||
cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
|
@ -37,8 +39,10 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
|
|||
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
|
||||
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
|
||||
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
|
||||
cloud.google.com/go/compute v1.6.0 h1:XdQIN5mdPTSBVwSIVDuY5e8ZzVAccsHvD3qTEz4zIps=
|
||||
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
|
||||
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
|
||||
cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk=
|
||||
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
|
||||
|
|
@ -52,6 +56,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
|
|
@ -136,8 +141,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
|
|
@ -159,16 +165,20 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
|
||||
github.com/googleapis/gax-go/v2 v2.3.0 h1:nRJtk3y8Fm770D42QV6T90ZnvFZyk7agSo3Q+Z9p3WI=
|
||||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
|
|
@ -282,8 +292,12 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
|||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -302,8 +316,10 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 h1:2o1E+E8TpNLklK9nHiPiK1uzIYrIHt+cQx3ynCwq9V8=
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -315,6 +331,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -367,8 +384,14 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 h1:eJv7u3ksNXoLbGSKuv2s/SIO4tJVxc/A+MTpzxDgz/Q=
|
||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810 h1:rHZQSjJdAI4Xf5Qzeh2bBc5YJIkPFVM6oDtMFYmgws0=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -439,8 +462,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
|
|
@ -475,8 +500,13 @@ google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tD
|
|||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
|
||||
google.golang.org/api v0.74.0 h1:ExR2D+5TYIrMphWgs5JCgwRhEDlPDXXrLwHHMgPHTXE=
|
||||
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
|
||||
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
|
||||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.94.0 h1:KtKM9ru3nzQioV1HLlUf1cR7vMYJIpgls5VhAYQXIwA=
|
||||
google.golang.org/api v0.94.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
@ -555,8 +585,18 @@ google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2
|
|||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335 h1:2D0OT6tPVdrQTOnVe1VQjfJPTED6EZ7fdJ/f6Db6OsY=
|
||||
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
|
||||
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220810155839-1856144b1d9c h1:IooGDWedfLC6KLczH/uduUsKQP42ZZYhKx+zd50L1Sk=
|
||||
google.golang.org/genproto v0.0.0-20220810155839-1856144b1d9c/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
|
@ -585,8 +625,11 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
|
|||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8=
|
||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w=
|
||||
google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
|
@ -601,8 +644,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
|
|
|
|||
1171
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
1171
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
File diff suppressed because it is too large
Load diff
157
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
157
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"google.golang.org/api/iterator"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
|
@ -90,7 +91,7 @@ type HMACKeyHandle struct {
|
|||
projectID string
|
||||
accessID string
|
||||
retry *retryConfig
|
||||
raw *raw.ProjectsHmacKeysService
|
||||
tc storageClient
|
||||
}
|
||||
|
||||
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
||||
|
|
@ -101,7 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
|||
projectID: projectID,
|
||||
accessID: accessID,
|
||||
retry: c.retry,
|
||||
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||
tc: c.tc,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -113,32 +114,15 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
|||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
|
||||
call := hkh.raw.Get(hkh.projectID, hkh.accessID)
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
|
||||
setClientHeader(call.Header())
|
||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
||||
hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
err = run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, hkh.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hkPb := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return pbHmacKeyToHMACKey(hkPb, false)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
|
||||
|
|
@ -147,49 +131,59 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
|
|||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
|
||||
delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID)
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
delCall = delCall.UserProject(desc.userProjectID)
|
||||
}
|
||||
setClientHeader(delCall.Header())
|
||||
|
||||
return run(ctx, func() error {
|
||||
return delCall.Context(ctx).Do()
|
||||
}, hkh.retry, true, setRetryHeaderHTTP(delCall))
|
||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
||||
return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
||||
}
|
||||
|
||||
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||
pbmd := pb.Metadata
|
||||
if pbmd == nil {
|
||||
func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
||||
hkmd := hk.Metadata
|
||||
if hkmd == nil {
|
||||
return nil, errors.New("field Metadata cannot be nil")
|
||||
}
|
||||
createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated)
|
||||
createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("field CreatedTime: %v", err)
|
||||
}
|
||||
updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated)
|
||||
updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated)
|
||||
if err != nil && !updatedTimeCanBeNil {
|
||||
return nil, fmt.Errorf("field UpdatedTime: %v", err)
|
||||
}
|
||||
|
||||
hmk := &HMACKey{
|
||||
AccessID: pbmd.AccessId,
|
||||
Secret: pb.Secret,
|
||||
Etag: pbmd.Etag,
|
||||
ID: pbmd.Id,
|
||||
State: HMACState(pbmd.State),
|
||||
ProjectID: pbmd.ProjectId,
|
||||
hmKey := &HMACKey{
|
||||
AccessID: hkmd.AccessId,
|
||||
Secret: hk.Secret,
|
||||
Etag: hkmd.Etag,
|
||||
ID: hkmd.Id,
|
||||
State: HMACState(hkmd.State),
|
||||
ProjectID: hkmd.ProjectId,
|
||||
CreatedTime: createdTime,
|
||||
UpdatedTime: updatedTime,
|
||||
|
||||
ServiceAccountEmail: pbmd.ServiceAccountEmail,
|
||||
ServiceAccountEmail: hkmd.ServiceAccountEmail,
|
||||
}
|
||||
|
||||
return hmk, nil
|
||||
return hmKey, nil
|
||||
}
|
||||
|
||||
func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
|
||||
if pbmd == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &HMACKey{
|
||||
AccessID: pbmd.GetAccessId(),
|
||||
ID: pbmd.GetId(),
|
||||
State: HMACState(pbmd.GetState()),
|
||||
ProjectID: pbmd.GetProject(),
|
||||
CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
|
||||
UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
|
||||
ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
|
||||
}
|
||||
}
|
||||
|
||||
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
|
||||
|
|
@ -203,29 +197,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma
|
|||
return nil, errors.New("storage: expecting a non-blank service account email")
|
||||
}
|
||||
|
||||
svc := raw.NewProjectsHmacKeysService(c.raw)
|
||||
call := svc.Create(projectID, serviceAccountEmail)
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
|
||||
setClientHeader(call.Header())
|
||||
|
||||
var hkPb *raw.HmacKey
|
||||
|
||||
if err := run(ctx, func() error {
|
||||
h, err := call.Context(ctx).Do()
|
||||
hkPb = h
|
||||
return err
|
||||
}, c.retry, false, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pbHmacKeyToHMACKey(hkPb, true)
|
||||
o := makeStorageOpts(false, c.retry, desc.userProjectID)
|
||||
hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
|
||||
|
|
@ -247,35 +226,15 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt
|
|||
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
|
||||
}
|
||||
|
||||
call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{
|
||||
Etag: au.Etag,
|
||||
State: string(au.State),
|
||||
})
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
if desc.userProjectID != "" {
|
||||
call = call.UserProject(desc.userProjectID)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
isIdempotent := len(au.Etag) > 0
|
||||
err = run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, h.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hkPb := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return pbHmacKeyToHMACKey(hkPb, false)
|
||||
o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID)
|
||||
hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...)
|
||||
return hk, err
|
||||
}
|
||||
|
||||
// An HMACKeysIterator is an iterator over HMACKeys.
|
||||
|
|
@ -301,27 +260,13 @@ type HMACKeysIterator struct {
|
|||
//
|
||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
||||
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
|
||||
it := &HMACKeysIterator{
|
||||
ctx: ctx,
|
||||
raw: raw.NewProjectsHmacKeysService(c.raw),
|
||||
projectID: projectID,
|
||||
retry: c.retry,
|
||||
}
|
||||
|
||||
desc := new(hmacKeyDesc)
|
||||
for _, opt := range opts {
|
||||
opt.withHMACKeyDesc(&it.desc)
|
||||
opt.withHMACKeyDesc(desc)
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.hmacKeys) - it.index },
|
||||
func() interface{} {
|
||||
prev := it.hmacKeys
|
||||
it.hmacKeys = it.hmacKeys[:0]
|
||||
it.index = 0
|
||||
return prev
|
||||
})
|
||||
return it
|
||||
o := makeStorageOpts(true, c.retry, desc.userProjectID)
|
||||
return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...)
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
|
|
@ -350,6 +295,8 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) {
|
|||
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
||||
// TODO: Remove fetch method upon integration. This method is internalized into
|
||||
// httpStorageClient.ListHMACKeys() as it is the only caller.
|
||||
call := it.raw.List(it.projectID)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
|
|
@ -379,10 +326,10 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
|
|||
}
|
||||
|
||||
for _, metadata := range resp.Items {
|
||||
hkPb := &raw.HmacKey{
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
hkey, err := pbHmacKeyToHMACKey(hkPb, true)
|
||||
hkey, err := toHMACKeyFromRaw(hk, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
|||
848
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
848
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
|
|
@ -16,14 +16,21 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
|
|
@ -152,7 +159,7 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin
|
|||
return res.EmailAddress, nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
|
||||
func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
var bkt *raw.Bucket
|
||||
if attrs != nil {
|
||||
|
|
@ -160,7 +167,7 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, at
|
|||
} else {
|
||||
bkt = &raw.Bucket{}
|
||||
}
|
||||
|
||||
bkt.Name = bucket
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
||||
|
|
@ -378,14 +385,143 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
|
|||
|
||||
// Object metadata methods.
|
||||
|
||||
func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := c.raw.Objects.Delete(bucket, object).Context(ctx)
|
||||
if err := applyConds("Delete", gen, conds, req); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
req.UserProject(s.userProject)
|
||||
}
|
||||
err := run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
return err
|
||||
}
|
||||
func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := c.raw.Objects.Get(bucket, object).Projection("full").Context(ctx)
|
||||
if err := applyConds("Attrs", gen, conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
req.UserProject(s.userProject)
|
||||
}
|
||||
if err := setEncryptionHeaders(req.Header(), encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
err = run(ctx, func() error {
|
||||
obj, err = req.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
var e *googleapi.Error
|
||||
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
||||
func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
|
||||
var attrs ObjectAttrs
|
||||
// Lists of fields to send, and set to null, in the JSON.
|
||||
var forceSendFields, nullFields []string
|
||||
if uattrs.ContentType != nil {
|
||||
attrs.ContentType = optional.ToString(uattrs.ContentType)
|
||||
// For ContentType, sending the empty string is a no-op.
|
||||
// Instead we send a null.
|
||||
if attrs.ContentType == "" {
|
||||
nullFields = append(nullFields, "ContentType")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "ContentType")
|
||||
}
|
||||
}
|
||||
if uattrs.ContentLanguage != nil {
|
||||
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
|
||||
// For ContentLanguage it's an error to send the empty string.
|
||||
// Instead we send a null.
|
||||
if attrs.ContentLanguage == "" {
|
||||
nullFields = append(nullFields, "ContentLanguage")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "ContentLanguage")
|
||||
}
|
||||
}
|
||||
if uattrs.ContentEncoding != nil {
|
||||
attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
|
||||
forceSendFields = append(forceSendFields, "ContentEncoding")
|
||||
}
|
||||
if uattrs.ContentDisposition != nil {
|
||||
attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
|
||||
forceSendFields = append(forceSendFields, "ContentDisposition")
|
||||
}
|
||||
if uattrs.CacheControl != nil {
|
||||
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
|
||||
forceSendFields = append(forceSendFields, "CacheControl")
|
||||
}
|
||||
if uattrs.EventBasedHold != nil {
|
||||
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
|
||||
forceSendFields = append(forceSendFields, "EventBasedHold")
|
||||
}
|
||||
if uattrs.TemporaryHold != nil {
|
||||
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
|
||||
forceSendFields = append(forceSendFields, "TemporaryHold")
|
||||
}
|
||||
if !uattrs.CustomTime.IsZero() {
|
||||
attrs.CustomTime = uattrs.CustomTime
|
||||
forceSendFields = append(forceSendFields, "CustomTime")
|
||||
}
|
||||
if uattrs.Metadata != nil {
|
||||
attrs.Metadata = uattrs.Metadata
|
||||
if len(attrs.Metadata) == 0 {
|
||||
// Sending the empty map is a no-op. We send null instead.
|
||||
nullFields = append(nullFields, "Metadata")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "Metadata")
|
||||
}
|
||||
}
|
||||
if uattrs.ACL != nil {
|
||||
attrs.ACL = uattrs.ACL
|
||||
// It's an error to attempt to delete the ACL, so
|
||||
// we don't append to nullFields here.
|
||||
forceSendFields = append(forceSendFields, "Acl")
|
||||
}
|
||||
rawObj := attrs.toRawObject(bucket)
|
||||
rawObj.ForceSendFields = forceSendFields
|
||||
rawObj.NullFields = nullFields
|
||||
call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full").Context(ctx)
|
||||
if err := applyConds("Update", gen, conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
if uattrs.PredefinedACL != "" {
|
||||
call.PredefinedAcl(uattrs.PredefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
var err error
|
||||
err = run(ctx, func() error { obj, err = call.Do(); return err }, s.retry, s.idempotent, setRetryHeaderHTTP(call))
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
||||
|
||||
// Default Object ACL methods.
|
||||
|
|
@ -412,8 +548,25 @@ func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st
|
|||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
}
|
||||
func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, opts ...storageOption) (*ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
var err error
|
||||
req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl)
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
return run(ctx, func() error {
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
// Bucket ACL methods.
|
||||
|
|
@ -441,7 +594,7 @@ func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, o
|
|||
return toBucketACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
|
||||
func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: bucket,
|
||||
|
|
@ -450,17 +603,11 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string,
|
|||
}
|
||||
req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl)
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
var aclRule ACLRule
|
||||
var err error
|
||||
err = run(ctx, func() error {
|
||||
acl, err = req.Do()
|
||||
aclRule = toBucketACLRule(acl)
|
||||
return run(ctx, func() error {
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &aclRule, nil
|
||||
}
|
||||
|
||||
// configureACLCall sets the context, user project and headers on the apiary library call.
|
||||
|
|
@ -477,29 +624,440 @@ func configureACLCall(ctx context.Context, userProject string, call interface{ H
|
|||
// Object ACL methods.
|
||||
|
||||
func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity))
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
// ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object.
|
||||
// Selecting a specific generation of this object is not currently supported by the client.
|
||||
func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
req := c.raw.ObjectAccessControls.List(bucket, object)
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
err = run(ctx, func() error {
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toObjectACLRules(acls.Items), nil
|
||||
}
|
||||
func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) (*ACLRule, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
var err error
|
||||
req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl)
|
||||
configureACLCall(ctx, s.userProject, req)
|
||||
return run(ctx, func() error {
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(req))
|
||||
}
|
||||
|
||||
// Media operations.
|
||||
|
||||
func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
rawReq := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
rawReq.Destination = req.dstObject.attrs.toRawObject(req.dstBucket)
|
||||
if req.sendCRC32C {
|
||||
rawReq.Destination.Crc32c = encodeUint32(req.dstObject.attrs.CRC32C)
|
||||
}
|
||||
for _, src := range req.srcs {
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.name,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
if req.predefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(req.predefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
|
||||
var err error
|
||||
retryCall := func() error { obj, err = call.Do(); return err }
|
||||
|
||||
if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
||||
func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error) {
|
||||
return nil, errMethodNotSupported
|
||||
s := callSettings(c.settings, opts...)
|
||||
rawObject := req.dstObject.attrs.toRawObject("")
|
||||
call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if req.token != "" {
|
||||
call.RewriteToken(req.token)
|
||||
}
|
||||
if req.dstObject.keyName != "" {
|
||||
call.DestinationKmsKeyName(req.dstObject.keyName)
|
||||
}
|
||||
if req.predefinedACL != "" {
|
||||
call.DestinationPredefinedAcl(req.predefinedACL)
|
||||
}
|
||||
if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
// Set destination encryption headers.
|
||||
if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set source encryption headers.
|
||||
if err := setEncryptionHeaders(call.Header(), req.srcObject.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
|
||||
retryCall := func() error { res, err = call.Do(); return err }
|
||||
|
||||
if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &rewriteObjectResponse{
|
||||
done: res.Done,
|
||||
written: res.TotalBytesRewritten,
|
||||
size: res.ObjectSize,
|
||||
token: res.RewriteToken,
|
||||
resource: newObject(res.Resource),
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) OpenReader(ctx context.Context, r *Reader, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: c.scheme,
|
||||
Host: c.readHost,
|
||||
Path: fmt.Sprintf("/%s/%s", params.bucket, params.object),
|
||||
}
|
||||
verb := "GET"
|
||||
if params.length == 0 {
|
||||
verb = "HEAD"
|
||||
}
|
||||
req, err := http.NewRequest(verb, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
if s.userProject != "" {
|
||||
req.Header.Set("X-Goog-User-Project", s.userProject)
|
||||
}
|
||||
if params.readCompressed {
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Define a function that initiates a Read with offset and length, assuming we
|
||||
// have already read seen bytes.
|
||||
reopen := func(seen int64) (*http.Response, error) {
|
||||
// If the context has already expired, return immediately without making a
|
||||
// call.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start := params.offset + seen
|
||||
if params.length < 0 && start < 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d", start))
|
||||
} else if params.length < 0 && start > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
|
||||
} else if params.length > 0 {
|
||||
// The end character isn't affected by how many bytes we've seen.
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1))
|
||||
}
|
||||
// We wait to assign conditions here because the generation number can change in between reopen() runs.
|
||||
if err := setConditionsHeaders(req.Header, params.conds); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If an object generation is specified, include generation as query string parameters.
|
||||
if params.gen >= 0 {
|
||||
req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen)
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
err = run(ctx, func() error {
|
||||
res, err = c.hc.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
res.Body.Close()
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return &googleapi.Error{
|
||||
Code: res.StatusCode,
|
||||
Header: res.Header,
|
||||
Body: string(body),
|
||||
}
|
||||
}
|
||||
|
||||
partialContentNotSatisfied :=
|
||||
!decompressiveTranscoding(res) &&
|
||||
start > 0 && params.length != 0 &&
|
||||
res.StatusCode != http.StatusPartialContent
|
||||
|
||||
if partialContentNotSatisfied {
|
||||
res.Body.Close()
|
||||
return errors.New("storage: partial request not satisfied")
|
||||
}
|
||||
|
||||
// With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves
|
||||
// back the whole file regardless of the range count passed in as per:
|
||||
// https://cloud.google.com/storage/docs/transcoding#range,
|
||||
// thus we have to manually move the body forward by seen bytes.
|
||||
if decompressiveTranscoding(res) && seen > 0 {
|
||||
_, _ = io.CopyN(ioutil.Discard, res.Body, seen)
|
||||
}
|
||||
|
||||
// If a generation hasn't been specified, and this is the first response we get, let's record the
|
||||
// generation. In future requests we'll use this generation as a precondition to avoid data races.
|
||||
if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
|
||||
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params.gen = gen64
|
||||
}
|
||||
return nil
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(nil))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
res, err := reopen(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
size int64 // total size of object, even if a range was requested.
|
||||
checkCRC bool
|
||||
crc uint32
|
||||
startOffset int64 // non-zero if range request.
|
||||
)
|
||||
if res.StatusCode == http.StatusPartialContent {
|
||||
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
|
||||
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
// Content range is formatted <first byte>-<last byte>/<total size>. We take
|
||||
// the total size.
|
||||
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
|
||||
dashIndex := strings.Index(cr, "-")
|
||||
if dashIndex >= 0 {
|
||||
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
size = res.ContentLength
|
||||
// Check the CRC iff all of the following hold:
|
||||
// - We asked for content (length != 0).
|
||||
// - We got all the content (status != PartialContent).
|
||||
// - The server sent a CRC header.
|
||||
// - The Go http stack did not uncompress the file.
|
||||
// - We were not served compressed data that was uncompressed on download.
|
||||
// The problem with the last two cases is that the CRC will not match -- GCS
|
||||
// computes it on the compressed contents, but we compute it on the
|
||||
// uncompressed contents.
|
||||
if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
|
||||
crc, checkCRC = parseCRC32c(res)
|
||||
}
|
||||
}
|
||||
|
||||
remain := res.ContentLength
|
||||
body := res.Body
|
||||
if params.length == 0 {
|
||||
remain = 0
|
||||
body.Close()
|
||||
body = emptyBody
|
||||
}
|
||||
var metaGen int64
|
||||
if res.Header.Get("X-Goog-Metageneration") != "" {
|
||||
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var lm time.Time
|
||||
if res.Header.Get("Last-Modified") != "" {
|
||||
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
attrs := ReaderObjectAttrs{
|
||||
Size: size,
|
||||
ContentType: res.Header.Get("Content-Type"),
|
||||
ContentEncoding: res.Header.Get("Content-Encoding"),
|
||||
CacheControl: res.Header.Get("Cache-Control"),
|
||||
LastModified: lm,
|
||||
StartOffset: startOffset,
|
||||
Generation: params.gen,
|
||||
Metageneration: metaGen,
|
||||
}
|
||||
return &Reader{
|
||||
Attrs: attrs,
|
||||
size: size,
|
||||
remain: remain,
|
||||
wantCRC: crc,
|
||||
checkCRC: checkCRC,
|
||||
reader: &httpReader{
|
||||
reopen: reopen,
|
||||
body: body,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
func (c *httpStorageClient) OpenWriter(ctx context.Context, w *Writer, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
errorf := params.setError
|
||||
setObj := params.setObj
|
||||
progress := params.progress
|
||||
attrs := params.attrs
|
||||
|
||||
mediaOpts := []googleapi.MediaOption{
|
||||
googleapi.ChunkSize(params.chunkSize),
|
||||
}
|
||||
if c := attrs.ContentType; c != "" {
|
||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||
}
|
||||
if params.chunkRetryDeadline != 0 {
|
||||
mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline))
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
go func() {
|
||||
defer close(params.donec)
|
||||
|
||||
rawObj := attrs.toRawObject(params.bucket)
|
||||
if params.sendCRC32C {
|
||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||
}
|
||||
if attrs.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(attrs.MD5)
|
||||
}
|
||||
call := c.raw.Objects.Insert(params.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(params.ctx).
|
||||
Name(params.attrs.Name)
|
||||
call.ProgressUpdater(func(n, _ int64) { progress(n) })
|
||||
|
||||
if attrs.KMSKeyName != "" {
|
||||
call.KmsKeyName(attrs.KMSKeyName)
|
||||
}
|
||||
if attrs.PredefinedACL != "" {
|
||||
call.PredefinedAcl(attrs.PredefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil {
|
||||
errorf(err)
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", defaultGen, params.conds, call)
|
||||
if err == nil {
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
// TODO(tritone): Remove this code when Uploads begin to support
|
||||
// retry attempt header injection with "client header" injection.
|
||||
setClientHeader(call.Header())
|
||||
|
||||
// The internals that perform call.Do automatically retry both the initial
|
||||
// call to set up the upload as well as calls to upload individual chunks
|
||||
// for a resumable upload (as long as the chunk size is non-zero). Hence
|
||||
// there is no need to add retries here.
|
||||
|
||||
// Retry only when the operation is idempotent or the retry policy is RetryAlways.
|
||||
isIdempotent := params.conds != nil && (params.conds.GenerationMatch >= 0 || params.conds.DoesNotExist == true)
|
||||
var useRetry bool
|
||||
if (s.retry == nil || s.retry.policy == RetryIdempotent) && isIdempotent {
|
||||
useRetry = true
|
||||
} else if s.retry != nil && s.retry.policy == RetryAlways {
|
||||
useRetry = true
|
||||
}
|
||||
if useRetry {
|
||||
if s.retry != nil {
|
||||
call.WithRetry(s.retry.backoff, s.retry.shouldRetry)
|
||||
} else {
|
||||
call.WithRetry(nil, nil)
|
||||
}
|
||||
}
|
||||
resp, err = call.Do()
|
||||
}
|
||||
if err != nil {
|
||||
errorf(err)
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
setObj(newObject(resp))
|
||||
}()
|
||||
|
||||
return pw, nil
|
||||
}
|
||||
|
||||
// IAM methods.
|
||||
|
|
@ -560,18 +1118,230 @@ func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource str
|
|||
|
||||
// HMAC Key methods.
|
||||
|
||||
func (c *httpStorageClient) GetHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Projects.HmacKeys.Get(project, accessID)
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
if err := run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return toHMACKeyFromRaw(hk, false)
|
||||
}
|
||||
func (c *httpStorageClient) ListHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) *HMACKeysIterator {
|
||||
return &HMACKeysIterator{}
|
||||
|
||||
func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator {
|
||||
s := callSettings(c.settings, opts...)
|
||||
it := &HMACKeysIterator{
|
||||
ctx: ctx,
|
||||
raw: c.raw.Projects.HmacKeys,
|
||||
projectID: project,
|
||||
retry: s.retry,
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (token string, err error) {
|
||||
call := c.raw.Projects.HmacKeys.List(project)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
call = call.PageToken(pageToken)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
call = call.MaxResults(int64(pageSize))
|
||||
}
|
||||
if showDeletedKeys {
|
||||
call = call.ShowDeletedKeys(true)
|
||||
}
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
if serviceAccountEmail != "" {
|
||||
call = call.ServiceAccountEmail(serviceAccountEmail)
|
||||
}
|
||||
|
||||
var resp *raw.HmacKeysMetadata
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = call.Context(it.ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, metadata := range resp.Items {
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
hkey, err := toHMACKeyFromRaw(hk, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.hmacKeys = append(it.hmacKeys, hkey)
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
fetch,
|
||||
func() int { return len(it.hmacKeys) - it.index },
|
||||
func() interface{} {
|
||||
prev := it.hmacKeys
|
||||
it.hmacKeys = it.hmacKeys[:0]
|
||||
it.index = 0
|
||||
return prev
|
||||
})
|
||||
return it
|
||||
}
|
||||
func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, desc *hmacKeyDesc, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Projects.HmacKeys.Update(project, accessID, &raw.HmacKeyMetadata{
|
||||
Etag: attrs.Etag,
|
||||
State: string(attrs.State),
|
||||
})
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
|
||||
var metadata *raw.HmacKeyMetadata
|
||||
var err error
|
||||
if err := run(ctx, func() error {
|
||||
metadata, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hk := &raw.HmacKey{
|
||||
Metadata: metadata,
|
||||
}
|
||||
return toHMACKeyFromRaw(hk, false)
|
||||
}
|
||||
func (c *httpStorageClient) CreateHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) (*HMACKey, error) {
|
||||
return nil, errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) {
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Projects.HmacKeys.Create(project, serviceAccountEmail)
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
|
||||
var hk *raw.HmacKey
|
||||
if err := run(ctx, func() error {
|
||||
h, err := call.Context(ctx).Do()
|
||||
hk = h
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toHMACKeyFromRaw(hk, true)
|
||||
}
|
||||
func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, desc *hmacKeyDesc, opts ...storageOption) error {
|
||||
return errMethodNotSupported
|
||||
|
||||
func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error {
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Projects.HmacKeys.Delete(project, accessID)
|
||||
if s.userProject != "" {
|
||||
call = call.UserProject(s.userProject)
|
||||
}
|
||||
return run(ctx, func() error {
|
||||
return call.Context(ctx).Do()
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
|
||||
}
|
||||
|
||||
// Notification methods.
|
||||
|
||||
// ListNotifications returns all the Notifications configured for this bucket, as a map indexed by notification ID.
|
||||
//
|
||||
// Note: This API does not support pagination. However, entity limits cap the number of notifications on a single bucket,
|
||||
// so all results will be returned in the first response. See https://cloud.google.com/storage/quotas#buckets.
|
||||
func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.ListNotifications")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Notifications.List(bucket)
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
var res *raw.Notifications
|
||||
err = run(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return notificationsToMap(res.Items), nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.CreateNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Notifications.Insert(bucket, toRawNotification(n))
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
var rn *raw.Notification
|
||||
err = run(ctx, func() error {
|
||||
rn, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toNotification(rn), nil
|
||||
}
|
||||
|
||||
func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.DeleteNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
s := callSettings(c.settings, opts...)
|
||||
call := c.raw.Notifications.Delete(bucket, id)
|
||||
if s.userProject != "" {
|
||||
call.UserProject(s.userProject)
|
||||
}
|
||||
return run(ctx, func() error {
|
||||
return call.Context(ctx).Do()
|
||||
}, s.retry, s.idempotent, setRetryHeaderHTTP(call))
|
||||
}
|
||||
|
||||
type httpReader struct {
|
||||
body io.ReadCloser
|
||||
seen int64
|
||||
reopen func(seen int64) (*http.Response, error)
|
||||
}
|
||||
|
||||
func (r *httpReader) Read(p []byte) (int, error) {
|
||||
n := 0
|
||||
for len(p[n:]) > 0 {
|
||||
m, err := r.body.Read(p[n:])
|
||||
n += m
|
||||
r.seen += int64(m)
|
||||
if err == nil || err == io.EOF {
|
||||
return n, err
|
||||
}
|
||||
// Read failed (likely due to connection issues), but we will try to reopen
|
||||
// the pipe and continue. Send a ranged read request that takes into account
|
||||
// the number of bytes we've already seen.
|
||||
res, err := r.reopen(r.seen)
|
||||
if err != nil {
|
||||
// reopen already retries
|
||||
return n, err
|
||||
}
|
||||
r.body.Close()
|
||||
r.body = res.Body
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (r *httpReader) Close() error {
|
||||
return r.body.Close()
|
||||
}
|
||||
|
|
|
|||
48
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
48
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
|
|
@ -27,17 +27,17 @@ import (
|
|||
// IAM provides access to IAM access control for the bucket.
|
||||
func (b *BucketHandle) IAM() *iam.Handle {
|
||||
return iam.InternalNewHandleClient(&iamClient{
|
||||
raw: b.c.raw,
|
||||
userProject: b.userProject,
|
||||
retry: b.retry,
|
||||
client: b.c,
|
||||
}, b.name)
|
||||
}
|
||||
|
||||
// iamClient implements the iam.client interface.
|
||||
type iamClient struct {
|
||||
raw *raw.Service
|
||||
userProject string
|
||||
retry *retryConfig
|
||||
client *Client
|
||||
}
|
||||
|
||||
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
|
||||
|
|
@ -48,57 +48,25 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(requestedPolicyVersion))
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
var rp *raw.Policy
|
||||
err = run(ctx, func() error {
|
||||
rp, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, c.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iamFromStoragePolicy(rp), nil
|
||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
||||
return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...)
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
rp := iamToStoragePolicy(p)
|
||||
call := c.raw.Buckets.SetIamPolicy(resource, rp)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
isIdempotent := len(p.Etag) > 0
|
||||
return run(ctx, func() error {
|
||||
_, err := call.Context(ctx).Do()
|
||||
return err
|
||||
}, c.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
o := makeStorageOpts(isIdempotent, c.retry, c.userProject)
|
||||
return c.client.tc.SetIamPolicy(ctx, resource, p, o...)
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := c.raw.Buckets.TestIamPermissions(resource, perms)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
var res *raw.TestIamPermissionsResponse
|
||||
err = run(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, c.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
||||
return c.client.tc.TestIamPermissions(ctx, resource, perms, o...)
|
||||
}
|
||||
|
||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||
|
|
|
|||
49
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
49
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
|
|
@ -19,43 +19,44 @@
|
|||
//
|
||||
// Lets you store and retrieve potentially-large, immutable data objects.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Example usage
|
||||
// # Example usage
|
||||
//
|
||||
// To get started with this package, create a client.
|
||||
// ctx := context.Background()
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// The client will use your default application credentials. Clients should be reused instead of created as needed.
|
||||
// The methods of Client are safe for concurrent use by multiple goroutines.
|
||||
// The returned client must be Closed when it is done being used.
|
||||
//
|
||||
// Using the Client
|
||||
// # Using the Client
|
||||
//
|
||||
// The following is an example of making an API call with the newly created client.
|
||||
//
|
||||
// ctx := context.Background()
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
// ctx := context.Background()
|
||||
// c, err := storage.NewClient(ctx)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// defer c.Close()
|
||||
//
|
||||
// req := &storagepb.DeleteBucketRequest{
|
||||
// // TODO: Fill request struct fields.
|
||||
// // See https://pkg.go.dev/google.golang.org/genproto/googleapis/storage/v2#DeleteBucketRequest.
|
||||
// }
|
||||
// err = c.DeleteBucket(ctx, req)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
// req := &storagepb.DeleteBucketRequest{
|
||||
// // TODO: Fill request struct fields.
|
||||
// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest.
|
||||
// }
|
||||
// err = c.DeleteBucket(ctx, req)
|
||||
// if err != nil {
|
||||
// // TODO: Handle error.
|
||||
// }
|
||||
//
|
||||
// Use of Context
|
||||
// # Use of Context
|
||||
//
|
||||
// The ctx passed to NewClient is used for authentication requests and
|
||||
// for creating the underlying connection, but is not used for subsequent calls.
|
||||
|
|
|
|||
5
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
5
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
|
|
@ -10,6 +10,11 @@
|
|||
"grpc": {
|
||||
"libraryClient": "Client",
|
||||
"rpcs": {
|
||||
"CancelResumableWrite": {
|
||||
"methods": [
|
||||
"CancelResumableWrite"
|
||||
]
|
||||
},
|
||||
"ComposeObject": {
|
||||
"methods": [
|
||||
"ComposeObject"
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
// Copyright 2019 Google LLC
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
|
@ -12,12 +12,15 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file, and the cloud.google.com/go import, won't actually become part of
|
||||
// the resultant binary.
|
||||
//go:build modhack
|
||||
// +build modhack
|
||||
|
||||
package storage
|
||||
|
||||
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
||||
import _ "cloud.google.com/go"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// InsertMetadata inserts the given gRPC metadata into the outgoing context.
|
||||
func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
return insertMetadata(ctx, mds...)
|
||||
}
|
||||
418
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
418
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
|
|
@ -18,15 +18,19 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/option/internaloption"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
|
@ -51,6 +55,7 @@ type CallOptions struct {
|
|||
ListNotifications []gax.CallOption
|
||||
ComposeObject []gax.CallOption
|
||||
DeleteObject []gax.CallOption
|
||||
CancelResumableWrite []gax.CallOption
|
||||
GetObject []gax.CallOption
|
||||
ReadObject []gax.CallOption
|
||||
UpdateObject []gax.CallOption
|
||||
|
|
@ -96,6 +101,7 @@ func defaultCallOptions() *CallOptions {
|
|||
ListNotifications: []gax.CallOption{},
|
||||
ComposeObject: []gax.CallOption{},
|
||||
DeleteObject: []gax.CallOption{},
|
||||
CancelResumableWrite: []gax.CallOption{},
|
||||
GetObject: []gax.CallOption{},
|
||||
ReadObject: []gax.CallOption{},
|
||||
UpdateObject: []gax.CallOption{},
|
||||
|
|
@ -113,7 +119,7 @@ func defaultCallOptions() *CallOptions {
|
|||
}
|
||||
}
|
||||
|
||||
// internalClient is an interface that defines the methods availaible from Cloud Storage API.
|
||||
// internalClient is an interface that defines the methods available from Cloud Storage API.
|
||||
type internalClient interface {
|
||||
Close() error
|
||||
setGoogleClientInfo(...string)
|
||||
|
|
@ -133,6 +139,7 @@ type internalClient interface {
|
|||
ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator
|
||||
ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
|
||||
DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error
|
||||
CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error)
|
||||
GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
|
||||
ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error)
|
||||
UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
|
||||
|
|
@ -158,22 +165,22 @@ type internalClient interface {
|
|||
//
|
||||
// Resources are named as follows:
|
||||
//
|
||||
// Projects are referred to as they are defined by the Resource Manager API,
|
||||
// using strings like projects/123456 or projects/my-string-id.
|
||||
// Projects are referred to as they are defined by the Resource Manager API,
|
||||
// using strings like projects/123456 or projects/my-string-id.
|
||||
//
|
||||
// Buckets are named using string names of the form:
|
||||
// projects/{project}/buckets/{bucket}
|
||||
// For globally unique buckets, _ may be substituted for the project.
|
||||
// Buckets are named using string names of the form:
|
||||
// projects/{project}/buckets/{bucket}
|
||||
// For globally unique buckets, _ may be substituted for the project.
|
||||
//
|
||||
// Objects are uniquely identified by their name along with the name of the
|
||||
// bucket they belong to, as separate strings in this API. For example:
|
||||
// Objects are uniquely identified by their name along with the name of the
|
||||
// bucket they belong to, as separate strings in this API. For example:
|
||||
//
|
||||
// ReadObjectRequest {
|
||||
// bucket: ‘projects/_/buckets/my-bucket’
|
||||
// object: ‘my-object’
|
||||
// }
|
||||
// Note that object names can contain / characters, which are treated as
|
||||
// any other character (no special directory semantics).
|
||||
// ReadObjectRequest {
|
||||
// bucket: ‘projects/_/buckets/my-bucket’
|
||||
// object: ‘my-object’
|
||||
// }
|
||||
// Note that object names can contain / characters, which are treated as
|
||||
// any other character (no special directory semantics).
|
||||
type Client struct {
|
||||
// The internal transport-dependent client.
|
||||
internalClient internalClient
|
||||
|
|
@ -229,17 +236,17 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L
|
|||
return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetIamPolicy gets the IAM policy for a specified bucket.
|
||||
// GetIamPolicy gets the IAM policy for a specified bucket or object.
|
||||
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
return c.internalClient.GetIamPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// SetIamPolicy updates an IAM policy for the specified bucket.
|
||||
// SetIamPolicy updates an IAM policy for the specified bucket or object.
|
||||
func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
return c.internalClient.SetIamPolicy(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// TestIamPermissions tests a set of permissions on the given bucket to see which, if
|
||||
// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if
|
||||
// any, are held by the caller.
|
||||
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
|
||||
return c.internalClient.TestIamPermissions(ctx, req, opts...)
|
||||
|
|
@ -280,12 +287,16 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject
|
|||
}
|
||||
|
||||
// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning
|
||||
// is not enabled for the bucket, or if the generation parameter
|
||||
// is used.
|
||||
// is not enabled for the bucket, or if the generation parameter is used.
|
||||
func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
|
||||
return c.internalClient.DeleteObject(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// CancelResumableWrite cancels an in-progress resumable upload.
|
||||
func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) {
|
||||
return c.internalClient.CancelResumableWrite(ctx, req, opts...)
|
||||
}
|
||||
|
||||
// GetObject retrieves an object’s metadata.
|
||||
func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
|
||||
return c.internalClient.GetObject(ctx, req, opts...)
|
||||
|
|
@ -312,13 +323,40 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe
|
|||
// true, or else it is an error.
|
||||
//
|
||||
// For a resumable write, the client should instead call
|
||||
// StartResumableWrite() and provide that method an WriteObjectSpec.
|
||||
// StartResumableWrite(), populating a WriteObjectSpec into that request.
|
||||
// They should then attach the returned upload_id to the first message of
|
||||
// each following call to Create. If there is an error or the connection is
|
||||
// broken during the resumable Create(), the client should check the status
|
||||
// of the Create() by calling QueryWriteStatus() and continue writing from
|
||||
// the returned persisted_size. This may be less than the amount of data the
|
||||
// client previously sent.
|
||||
// each following call to WriteObject. If the stream is closed before
|
||||
// finishing the upload (either explicitly by the client or due to a network
|
||||
// error or an error response from the server), the client should do as
|
||||
// follows:
|
||||
//
|
||||
// Check the result Status of the stream, to determine if writing can be
|
||||
// resumed on this stream or must be restarted from scratch (by calling
|
||||
// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED,
|
||||
// INTERNAL, and UNAVAILABLE. For each case, the client should use binary
|
||||
// exponential backoff before retrying. Additionally, writes can be
|
||||
// resumed after RESOURCE_EXHAUSTED errors, but only after taking
|
||||
// appropriate measures, which may include reducing aggregate send rate
|
||||
// across clients and/or requesting a quota increase for your project.
|
||||
//
|
||||
// If the call to WriteObject returns ABORTED, that indicates
|
||||
// concurrent attempts to update the resumable write, caused either by
|
||||
// multiple racing clients or by a single client where the previous
|
||||
// request was timed out on the client side but nonetheless reached the
|
||||
// server. In this case the client should take steps to prevent further
|
||||
// concurrent writes (e.g., increase the timeouts, stop using more than
|
||||
// one process to perform the upload, etc.), and then should follow the
|
||||
// steps below for resuming the upload.
|
||||
//
|
||||
// For resumable errors, the client should call QueryWriteStatus() and
|
||||
// then continue writing from the returned persisted_size. This may be
|
||||
// less than the amount of data the client previously sent. Note also that
|
||||
// it is acceptable to send data starting at an offset earlier than the
|
||||
// returned persisted_size; in this case, the service will skip data at
|
||||
// offsets that were already persisted (without checking that it matches
|
||||
// the previously written data), and write only the data starting from the
|
||||
// persisted offset. This behavior can make client-side handling simpler
|
||||
// in some cases.
|
||||
//
|
||||
// The service will not view the object as complete until the client has
|
||||
// sent a WriteObjectRequest with finish_write set to true. Sending any
|
||||
|
|
@ -326,6 +364,10 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe
|
|||
// true will cause an error. The client should check the response it
|
||||
// receives to determine how much data the service was able to commit and
|
||||
// whether the service views the object as complete.
|
||||
//
|
||||
// Attempting to resume an already finalized object will result in an OK
|
||||
// status, with a WriteObjectResponse containing the finalized object’s
|
||||
// metadata.
|
||||
func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) {
|
||||
return c.internalClient.WriteObject(ctx, opts...)
|
||||
}
|
||||
|
|
@ -424,22 +466,22 @@ type gRPCClient struct {
|
|||
//
|
||||
// Resources are named as follows:
|
||||
//
|
||||
// Projects are referred to as they are defined by the Resource Manager API,
|
||||
// using strings like projects/123456 or projects/my-string-id.
|
||||
// Projects are referred to as they are defined by the Resource Manager API,
|
||||
// using strings like projects/123456 or projects/my-string-id.
|
||||
//
|
||||
// Buckets are named using string names of the form:
|
||||
// projects/{project}/buckets/{bucket}
|
||||
// For globally unique buckets, _ may be substituted for the project.
|
||||
// Buckets are named using string names of the form:
|
||||
// projects/{project}/buckets/{bucket}
|
||||
// For globally unique buckets, _ may be substituted for the project.
|
||||
//
|
||||
// Objects are uniquely identified by their name along with the name of the
|
||||
// bucket they belong to, as separate strings in this API. For example:
|
||||
// Objects are uniquely identified by their name along with the name of the
|
||||
// bucket they belong to, as separate strings in this API. For example:
|
||||
//
|
||||
// ReadObjectRequest {
|
||||
// bucket: ‘projects/_/buckets/my-bucket’
|
||||
// object: ‘my-object’
|
||||
// }
|
||||
// Note that object names can contain / characters, which are treated as
|
||||
// any other character (no special directory semantics).
|
||||
// ReadObjectRequest {
|
||||
// bucket: ‘projects/_/buckets/my-bucket’
|
||||
// object: ‘my-object’
|
||||
// }
|
||||
// Note that object names can contain / characters, which are treated as
|
||||
// any other character (no special directory semantics).
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
clientOpts := defaultGRPCClientOptions()
|
||||
if newClientHook != nil {
|
||||
|
|
@ -497,7 +539,18 @@ func (c *gRPCClient) Close() error {
|
|||
}
|
||||
|
||||
func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
|
@ -508,7 +561,18 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck
|
|||
}
|
||||
|
||||
func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...)
|
||||
var resp *storagepb.Bucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -581,7 +645,18 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets
|
|||
}
|
||||
|
||||
func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...)
|
||||
var resp *storagepb.Bucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -596,7 +671,21 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage
|
|||
}
|
||||
|
||||
func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
|
||||
var resp *iampb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -611,7 +700,21 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
|
||||
var resp *iampb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -626,7 +729,21 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
|
||||
var resp *iampb.TestIamPermissionsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -641,7 +758,18 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
|
|||
}
|
||||
|
||||
func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...)
|
||||
var resp *storagepb.Bucket
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -656,7 +784,18 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
|
|||
}
|
||||
|
||||
func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
|
@ -667,7 +806,18 @@ func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.Dele
|
|||
}
|
||||
|
||||
func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...)
|
||||
var resp *storagepb.Notification
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -682,7 +832,18 @@ func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNoti
|
|||
}
|
||||
|
||||
func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...)
|
||||
var resp *storagepb.Notification
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -697,7 +858,18 @@ func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.Crea
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...)
|
||||
it := &NotificationIterator{}
|
||||
req = proto.Clone(req).(*storagepb.ListNotificationsRequest)
|
||||
|
|
@ -740,7 +912,18 @@ func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListN
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...)
|
||||
var resp *storagepb.Object
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -755,7 +938,18 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb
|
|||
}
|
||||
|
||||
func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
|
@ -765,8 +959,45 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) {
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...)
|
||||
var resp *storagepb.CancelResumableWriteResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...)
|
||||
var resp *storagepb.Object
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -781,7 +1012,18 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
var resp storagepb.Storage_ReadObjectClient
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
|
@ -795,7 +1037,18 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe
|
|||
}
|
||||
|
||||
func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...)
|
||||
var resp *storagepb.Object
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -825,7 +1078,18 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s
|
|||
}
|
||||
|
||||
func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...)
|
||||
it := &ObjectIterator{}
|
||||
req = proto.Clone(req).(*storagepb.ListObjectsRequest)
|
||||
|
|
@ -868,7 +1132,21 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects
|
|||
}
|
||||
|
||||
func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 {
|
||||
routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])
|
||||
}
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...)
|
||||
var resp *storagepb.RewriteResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -883,7 +1161,18 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb
|
|||
}
|
||||
|
||||
func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...)
|
||||
var resp *storagepb.StartResumableWriteResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
@ -898,7 +1187,18 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta
|
|||
}
|
||||
|
||||
func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
routingHeaders := ""
|
||||
routingHeadersMap := make(map[string]string)
|
||||
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 {
|
||||
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])
|
||||
}
|
||||
for headerName, headerValue := range routingHeadersMap {
|
||||
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
|
||||
}
|
||||
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
|
||||
md := metadata.Pairs("x-goog-request-params", routingHeaders)
|
||||
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...)
|
||||
var resp *storagepb.QueryWriteStatusResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
|
|||
10606
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
generated
vendored
Normal file
10606
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
|
|
@ -15,4 +15,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.22.1"
|
||||
const Version = "1.26.0"
|
||||
|
|
|
|||
15
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
15
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
|
|
@ -57,7 +57,7 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten
|
|||
bo.Initial = retry.backoff.Initial
|
||||
bo.Max = retry.backoff.Max
|
||||
}
|
||||
var errorFunc func(err error) bool = shouldRetry
|
||||
var errorFunc func(err error) bool = ShouldRetry
|
||||
if retry.shouldRetry != nil {
|
||||
errorFunc = retry.shouldRetry
|
||||
}
|
||||
|
|
@ -89,7 +89,16 @@ func setRetryHeaderGRPC(_ context.Context) func(string, int) {
|
|||
}
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
// ShouldRetry returns true if an error is retryable, based on best practice
|
||||
// guidance from GCS. See
|
||||
// https://cloud.google.com/storage/docs/retry-strategy#go for more information
|
||||
// on what errors are considered retryable.
|
||||
//
|
||||
// If you would like to customize retryable errors, use the WithErrorFunc to
|
||||
// supply a RetryOption to your library calls. For example, to retry additional
|
||||
// errors, you can write a custom func that wraps ShouldRetry and also specifies
|
||||
// additional errors that should return true.
|
||||
func ShouldRetry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
|
@ -131,7 +140,7 @@ func shouldRetry(err error) bool {
|
|||
}
|
||||
// Unwrap is only supported in go1.13.x+
|
||||
if e, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return shouldRetry(e.Unwrap())
|
||||
return ShouldRetry(e.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
77
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
77
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
|
|
@ -21,6 +21,7 @@ import (
|
|||
"regexp"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
|
|
@ -91,6 +92,30 @@ func toNotification(rn *raw.Notification) *Notification {
|
|||
return n
|
||||
}
|
||||
|
||||
func toNotificationFromProto(pbn *storagepb.Notification) *Notification {
|
||||
n := &Notification{
|
||||
ID: pbn.GetName(),
|
||||
EventTypes: pbn.GetEventTypes(),
|
||||
ObjectNamePrefix: pbn.GetObjectNamePrefix(),
|
||||
CustomAttributes: pbn.GetCustomAttributes(),
|
||||
PayloadFormat: pbn.GetPayloadFormat(),
|
||||
}
|
||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic)
|
||||
return n
|
||||
}
|
||||
|
||||
func toProtoNotification(n *Notification) *storagepb.Notification {
|
||||
return &storagepb.Notification{
|
||||
Name: n.ID,
|
||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
||||
n.TopicProjectID, n.TopicID),
|
||||
EventTypes: n.EventTypes,
|
||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
||||
CustomAttributes: n.CustomAttributes,
|
||||
PayloadFormat: n.PayloadFormat,
|
||||
}
|
||||
}
|
||||
|
||||
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
|
||||
|
||||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
||||
|
|
@ -132,21 +157,10 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
|
|||
if n.TopicID == "" {
|
||||
return nil, errors.New("storage: AddNotification: missing TopicID")
|
||||
}
|
||||
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
|
||||
var rn *raw.Notification
|
||||
err = run(ctx, func() error {
|
||||
rn, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, false, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toNotification(rn), nil
|
||||
opts := makeStorageOpts(false, b.retry, b.userProject)
|
||||
ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Notifications returns all the Notifications configured for this bucket, as a map
|
||||
|
|
@ -155,20 +169,9 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := b.c.raw.Notifications.List(b.name)
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
var res *raw.Notifications
|
||||
err = run(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
}, b.retry, true, setRetryHeaderHTTP(call))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return notificationsToMap(res.Items), nil
|
||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
||||
n, err = b.c.tc.ListNotifications(ctx, b.name, opts...)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
||||
|
|
@ -179,17 +182,19 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
|||
return m
|
||||
}
|
||||
|
||||
func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification {
|
||||
m := map[string]*Notification{}
|
||||
for _, n := range ns {
|
||||
m[n.Name] = toNotificationFromProto(n)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// DeleteNotification deletes the notification with the given ID.
|
||||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := b.c.raw.Notifications.Delete(b.name, id)
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
return run(ctx, func() error {
|
||||
return call.Context(ctx).Do()
|
||||
}, b.retry, true, setRetryHeaderHTTP(call))
|
||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
||||
return b.c.tc.DeleteNotification(ctx, b.name, id, opts...)
|
||||
}
|
||||
|
|
|
|||
507
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
507
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
|
|
@ -16,20 +16,15 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"google.golang.org/api/googleapi"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
|
@ -95,10 +90,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
|||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if o.c.gc != nil {
|
||||
return o.newRangeReaderWithGRPC(ctx, offset, length)
|
||||
}
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -110,208 +101,31 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
u := &url.URL{
|
||||
Scheme: o.c.scheme,
|
||||
Host: o.c.readHost,
|
||||
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
|
||||
}
|
||||
verb := "GET"
|
||||
if length == 0 {
|
||||
verb = "HEAD"
|
||||
}
|
||||
req, err := http.NewRequest(verb, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
if o.userProject != "" {
|
||||
req.Header.Set("X-Goog-User-Project", o.userProject)
|
||||
}
|
||||
if o.readCompressed {
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
|
||||
opts := makeStorageOpts(true, o.retry, o.userProject)
|
||||
|
||||
params := &newRangeReaderParams{
|
||||
bucket: o.bucket,
|
||||
object: o.object,
|
||||
gen: o.gen,
|
||||
offset: offset,
|
||||
length: length,
|
||||
encryptionKey: o.encryptionKey,
|
||||
conds: o.conds,
|
||||
readCompressed: o.readCompressed,
|
||||
}
|
||||
|
||||
gen := o.gen
|
||||
r, err = o.c.tc.NewRangeReader(ctx, params, opts...)
|
||||
|
||||
// Define a function that initiates a Read with offset and length, assuming we
|
||||
// have already read seen bytes.
|
||||
reopen := func(seen int64) (*http.Response, error) {
|
||||
// If the context has already expired, return immediately without making a
|
||||
// call.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start := offset + seen
|
||||
if length < 0 && start < 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d", start))
|
||||
} else if length < 0 && start > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
|
||||
} else if length > 0 {
|
||||
// The end character isn't affected by how many bytes we've seen.
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
|
||||
}
|
||||
// We wait to assign conditions here because the generation number can change in between reopen() runs.
|
||||
if err := setConditionsHeaders(req.Header, o.conds); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If an object generation is specified, include generation as query string parameters.
|
||||
if gen >= 0 {
|
||||
req.URL.RawQuery = fmt.Sprintf("generation=%d", gen)
|
||||
}
|
||||
|
||||
var res *http.Response
|
||||
err = run(ctx, func() error {
|
||||
res, err = o.c.hc.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
res.Body.Close()
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return &googleapi.Error{
|
||||
Code: res.StatusCode,
|
||||
Header: res.Header,
|
||||
Body: string(body),
|
||||
}
|
||||
}
|
||||
|
||||
partialContentNotSatisfied :=
|
||||
!decompressiveTranscoding(res) &&
|
||||
start > 0 && length != 0 &&
|
||||
res.StatusCode != http.StatusPartialContent
|
||||
|
||||
if partialContentNotSatisfied {
|
||||
res.Body.Close()
|
||||
return errors.New("storage: partial request not satisfied")
|
||||
}
|
||||
|
||||
// With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves
|
||||
// back the whole file regardless of the range count passed in as per:
|
||||
// https://cloud.google.com/storage/docs/transcoding#range,
|
||||
// thus we have to manually move the body forward by seen bytes.
|
||||
if decompressiveTranscoding(res) && seen > 0 {
|
||||
_, _ = io.CopyN(ioutil.Discard, res.Body, seen)
|
||||
}
|
||||
|
||||
// If a generation hasn't been specified, and this is the first response we get, let's record the
|
||||
// generation. In future requests we'll use this generation as a precondition to avoid data races.
|
||||
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
|
||||
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gen = gen64
|
||||
}
|
||||
return nil
|
||||
}, o.retry, true, setRetryHeaderHTTP(&readerRequestWrapper{req}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
res, err := reopen(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
size int64 // total size of object, even if a range was requested.
|
||||
checkCRC bool
|
||||
crc uint32
|
||||
startOffset int64 // non-zero if range request.
|
||||
)
|
||||
if res.StatusCode == http.StatusPartialContent {
|
||||
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
|
||||
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
// Content range is formatted <first byte>-<last byte>/<total size>. We take
|
||||
// the total size.
|
||||
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
|
||||
dashIndex := strings.Index(cr, "-")
|
||||
if dashIndex >= 0 {
|
||||
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
size = res.ContentLength
|
||||
// Check the CRC iff all of the following hold:
|
||||
// - We asked for content (length != 0).
|
||||
// - We got all the content (status != PartialContent).
|
||||
// - The server sent a CRC header.
|
||||
// - The Go http stack did not uncompress the file.
|
||||
// - We were not served compressed data that was uncompressed on download.
|
||||
// The problem with the last two cases is that the CRC will not match -- GCS
|
||||
// computes it on the compressed contents, but we compute it on the
|
||||
// uncompressed contents.
|
||||
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
|
||||
crc, checkCRC = parseCRC32c(res)
|
||||
}
|
||||
}
|
||||
|
||||
remain := res.ContentLength
|
||||
body := res.Body
|
||||
if length == 0 {
|
||||
remain = 0
|
||||
body.Close()
|
||||
body = emptyBody
|
||||
}
|
||||
var metaGen int64
|
||||
if res.Header.Get("X-Goog-Metageneration") != "" {
|
||||
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var lm time.Time
|
||||
if res.Header.Get("Last-Modified") != "" {
|
||||
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
attrs := ReaderObjectAttrs{
|
||||
Size: size,
|
||||
ContentType: res.Header.Get("Content-Type"),
|
||||
ContentEncoding: res.Header.Get("Content-Encoding"),
|
||||
CacheControl: res.Header.Get("Cache-Control"),
|
||||
LastModified: lm,
|
||||
StartOffset: startOffset,
|
||||
Generation: gen,
|
||||
Metageneration: metaGen,
|
||||
}
|
||||
return &Reader{
|
||||
Attrs: attrs,
|
||||
body: body,
|
||||
size: size,
|
||||
remain: remain,
|
||||
wantCRC: crc,
|
||||
checkCRC: checkCRC,
|
||||
reopen: reopen,
|
||||
}, nil
|
||||
return r, err
|
||||
}
|
||||
|
||||
// decompressiveTranscoding returns true if the request was served decompressed
|
||||
// and different than its original storage form. This happens when the "Content-Encoding"
|
||||
// header is "gzip".
|
||||
// See:
|
||||
// * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
|
||||
// * https://github.com/googleapis/google-cloud-go/issues/1800
|
||||
// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
|
||||
// - https://github.com/googleapis/google-cloud-go/issues/1800
|
||||
func decompressiveTranscoding(res *http.Response) bool {
|
||||
// Decompressive Transcoding.
|
||||
return res.Header.Get("Content-Encoding") == "gzip" ||
|
||||
|
|
@ -376,42 +190,21 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
|||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
||||
type Reader struct {
|
||||
Attrs ReaderObjectAttrs
|
||||
body io.ReadCloser
|
||||
seen, remain, size int64
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
reopen func(seen int64) (*http.Response, error)
|
||||
|
||||
// The following fields are only for use in the gRPC hybrid client.
|
||||
stream storagepb.Storage_ReadObjectClient
|
||||
reopenWithGRPC func(seen int64) (*readStreamResponse, context.CancelFunc, error)
|
||||
leftovers []byte
|
||||
cancelStream context.CancelFunc
|
||||
}
|
||||
|
||||
type readStreamResponse struct {
|
||||
stream storagepb.Storage_ReadObjectClient
|
||||
response *storagepb.ReadObjectResponse
|
||||
reader io.ReadCloser
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
func (r *Reader) Close() error {
|
||||
if r.body != nil {
|
||||
return r.body.Close()
|
||||
}
|
||||
|
||||
r.closeStream()
|
||||
return nil
|
||||
return r.reader.Close()
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
read := r.readWithRetry
|
||||
if r.reopenWithGRPC != nil {
|
||||
read = r.readWithGRPC
|
||||
}
|
||||
|
||||
n, err := read(p)
|
||||
n, err := r.reader.Read(p)
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
|
|
@ -430,268 +223,6 @@ func (r *Reader) Read(p []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
// newRangeReaderWithGRPC creates a new Reader with the given range that uses
|
||||
// gRPC to read Object content.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, length int64) (r *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.newRangeReaderWithGRPC")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if o.c.gc == nil {
|
||||
err = fmt.Errorf("handle doesn't have a gRPC client initialized")
|
||||
return
|
||||
}
|
||||
if err = o.validate(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// A negative length means "read to the end of the object", but the
|
||||
// read_limit field it corresponds to uses zero to mean the same thing. Thus
|
||||
// we coerce the length to 0 to read to the end of the object.
|
||||
if length < 0 {
|
||||
length = 0
|
||||
}
|
||||
|
||||
// For now, there are only globally unique buckets, and "_" is the alias
|
||||
// project ID for such buckets.
|
||||
b := bucketResourceName("_", o.bucket)
|
||||
req := &storagepb.ReadObjectRequest{
|
||||
Bucket: b,
|
||||
Object: o.object,
|
||||
}
|
||||
// The default is a negative value, which means latest.
|
||||
if o.gen >= 0 {
|
||||
req.Generation = o.gen
|
||||
}
|
||||
|
||||
// Define a function that initiates a Read with offset and length, assuming
|
||||
// we have already read seen bytes.
|
||||
reopen := func(seen int64) (*readStreamResponse, context.CancelFunc, error) {
|
||||
// If the context has already expired, return immediately without making
|
||||
// we call.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cc, cancel := context.WithCancel(ctx)
|
||||
|
||||
start := offset + seen
|
||||
// Only set a ReadLimit if length is greater than zero, because zero
|
||||
// means read it all.
|
||||
if length > 0 {
|
||||
req.ReadLimit = length - seen
|
||||
}
|
||||
req.ReadOffset = start
|
||||
|
||||
if err := applyCondsProto("reopenWithGRPC", o.gen, o.conds, req); err != nil {
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var stream storagepb.Storage_ReadObjectClient
|
||||
var msg *storagepb.ReadObjectResponse
|
||||
var err error
|
||||
|
||||
err = run(cc, func() error {
|
||||
stream, err = o.c.gc.ReadObject(cc, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg, err = stream.Recv()
|
||||
|
||||
return err
|
||||
}, o.retry, true, setRetryHeaderGRPC(ctx))
|
||||
if err != nil {
|
||||
// Close the stream context we just created to ensure we don't leak
|
||||
// resources.
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &readStreamResponse{stream, msg}, cancel, nil
|
||||
}
|
||||
|
||||
res, cancel, err := reopen(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r = &Reader{
|
||||
stream: res.stream,
|
||||
reopenWithGRPC: reopen,
|
||||
cancelStream: cancel,
|
||||
}
|
||||
|
||||
// The first message was Recv'd on stream open, use it to populate the
|
||||
// object metadata.
|
||||
msg := res.response
|
||||
obj := msg.GetMetadata()
|
||||
// This is the size of the entire object, even if only a range was requested.
|
||||
size := obj.GetSize()
|
||||
|
||||
r.Attrs = ReaderObjectAttrs{
|
||||
Size: size,
|
||||
ContentType: obj.GetContentType(),
|
||||
ContentEncoding: obj.GetContentEncoding(),
|
||||
CacheControl: obj.GetCacheControl(),
|
||||
LastModified: obj.GetUpdateTime().AsTime(),
|
||||
Metageneration: obj.GetMetageneration(),
|
||||
Generation: obj.GetGeneration(),
|
||||
}
|
||||
|
||||
r.size = size
|
||||
cr := msg.GetContentRange()
|
||||
if cr != nil {
|
||||
r.Attrs.StartOffset = cr.GetStart()
|
||||
r.remain = cr.GetEnd() - cr.GetStart() + 1
|
||||
} else {
|
||||
r.remain = size
|
||||
}
|
||||
|
||||
// Only support checksums when reading an entire object, not a range.
|
||||
if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && offset == 0 && length == 0 {
|
||||
r.wantCRC = checksums.GetCrc32C()
|
||||
r.checkCRC = true
|
||||
}
|
||||
|
||||
// Store the content from the first Recv in the client buffer for reading
|
||||
// later.
|
||||
r.leftovers = msg.GetChecksummedData().GetContent()
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *Reader) readWithRetry(p []byte) (int, error) {
|
||||
n := 0
|
||||
for len(p[n:]) > 0 {
|
||||
m, err := r.body.Read(p[n:])
|
||||
n += m
|
||||
r.seen += int64(m)
|
||||
if err == nil || err == io.EOF {
|
||||
return n, err
|
||||
}
|
||||
// Read failed (likely due to connection issues), but we will try to reopen
|
||||
// the pipe and continue. Send a ranged read request that takes into account
|
||||
// the number of bytes we've already seen.
|
||||
res, err := r.reopen(r.seen)
|
||||
if err != nil {
|
||||
// reopen already retries
|
||||
return n, err
|
||||
}
|
||||
r.body.Close()
|
||||
r.body = res.Body
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// closeStream cancels a stream's context in order for it to be closed and
|
||||
// collected.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (r *Reader) closeStream() {
|
||||
if r.cancelStream != nil {
|
||||
r.cancelStream()
|
||||
}
|
||||
r.stream = nil
|
||||
}
|
||||
|
||||
// readWithGRPC reads bytes into the user's buffer from an open gRPC stream.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (r *Reader) readWithGRPC(p []byte) (int, error) {
|
||||
// No stream to read from, either never initiliazed or Close was called.
|
||||
// Note: There is a potential concurrency issue if multiple routines are
|
||||
// using the same reader. One encounters an error and the stream is closed
|
||||
// and then reopened while the other routine attempts to read from it.
|
||||
if r.stream == nil {
|
||||
return 0, fmt.Errorf("reader has been closed")
|
||||
}
|
||||
|
||||
// The entire object has been read by this reader, return EOF.
|
||||
if r.size != 0 && r.size == r.seen {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
var n int
|
||||
// Read leftovers and return what was available to conform to the Reader
|
||||
// interface: https://pkg.go.dev/io#Reader.
|
||||
if len(r.leftovers) > 0 {
|
||||
n = copy(p, r.leftovers)
|
||||
r.seen += int64(n)
|
||||
r.leftovers = r.leftovers[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Attempt to Recv the next message on the stream.
|
||||
msg, err := r.recv()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// TODO: Determine if we need to capture incremental CRC32C for this
|
||||
// chunk. The Object CRC32C checksum is captured when directed to read
|
||||
// the entire Object. If directed to read a range, we may need to
|
||||
// calculate the range's checksum for verification if the checksum is
|
||||
// present in the response here.
|
||||
// TODO: Figure out if we need to support decompressive transcoding
|
||||
// https://cloud.google.com/storage/docs/transcoding.
|
||||
content := msg.GetChecksummedData().GetContent()
|
||||
n = copy(p[n:], content)
|
||||
leftover := len(content) - n
|
||||
if leftover > 0 {
|
||||
// Wasn't able to copy all of the data in the message, store for
|
||||
// future Read calls.
|
||||
r.leftovers = content[n:]
|
||||
}
|
||||
r.seen += int64(n)
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// recv attempts to Recv the next message on the stream. In the event
|
||||
// that a retryable error is encountered, the stream will be closed, reopened,
|
||||
// and Recv again. This will attempt to Recv until one of the following is true:
|
||||
//
|
||||
// * Recv is successful
|
||||
// * A non-retryable error is encountered
|
||||
// * The Reader's context is canceled
|
||||
//
|
||||
// The last error received is the one that is returned, which could be from
|
||||
// an attempt to reopen the stream.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (r *Reader) recv() (*storagepb.ReadObjectResponse, error) {
|
||||
msg, err := r.stream.Recv()
|
||||
if err != nil && shouldRetry(err) {
|
||||
// This will "close" the existing stream and immediately attempt to
|
||||
// reopen the stream, but will backoff if further attempts are necessary.
|
||||
// Reopening the stream Recvs the first message, so if retrying is
|
||||
// successful, the next logical chunk will be returned.
|
||||
msg, err = r.reopenStream(r.seen)
|
||||
}
|
||||
|
||||
return msg, err
|
||||
}
|
||||
|
||||
// reopenStream "closes" the existing stream and attempts to reopen a stream and
|
||||
// sets the Reader's stream and cancelStream properties in the process.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (r *Reader) reopenStream(seen int64) (*storagepb.ReadObjectResponse, error) {
|
||||
// Close existing stream and initialize new stream with updated offset.
|
||||
r.closeStream()
|
||||
|
||||
res, cancel, err := r.reopenWithGRPC(r.seen)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.stream = res.stream
|
||||
r.cancelStream = cancel
|
||||
return res.response, nil
|
||||
}
|
||||
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
|
|
|
|||
318
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
318
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
|
|
@ -40,7 +40,7 @@ import (
|
|||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/storage/internal"
|
||||
gapic "cloud.google.com/go/storage/internal/apiv2"
|
||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
|
@ -49,7 +49,6 @@ import (
|
|||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/api/transport"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
|
@ -84,6 +83,14 @@ const (
|
|||
// ScopeReadWrite grants permissions to manage your
|
||||
// data in Google Cloud Storage.
|
||||
ScopeReadWrite = raw.DevstorageReadWriteScope
|
||||
|
||||
// aes256Algorithm is the AES256 encryption algorithm used with the
|
||||
// Customer-Supplied Encryption Keys feature.
|
||||
aes256Algorithm = "AES256"
|
||||
|
||||
// defaultGen indicates the latest object generation by default,
|
||||
// using a negative value.
|
||||
defaultGen = int64(-1)
|
||||
)
|
||||
|
||||
// TODO: remove this once header with invocation ID is applied to all methods.
|
||||
|
|
@ -106,10 +113,12 @@ type Client struct {
|
|||
creds *google.Credentials
|
||||
retry *retryConfig
|
||||
|
||||
// gc is an optional gRPC-based, GAPIC client.
|
||||
//
|
||||
// This is an experimental field and not intended for public use.
|
||||
gc *gapic.Client
|
||||
// tc is the transport-agnostic client implemented with either gRPC or HTTP.
|
||||
tc storageClient
|
||||
// useGRPC flags whether the client uses gRPC. This is needed while the
|
||||
// integration piece is only partially complete.
|
||||
// TODO: remove before merging to main.
|
||||
useGRPC bool
|
||||
}
|
||||
|
||||
// NewClient creates a new Google Cloud Storage client.
|
||||
|
|
@ -190,12 +199,18 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
|||
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
|
||||
}
|
||||
|
||||
tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: %v", err)
|
||||
}
|
||||
|
||||
return &Client{
|
||||
hc: hc,
|
||||
raw: rawService,
|
||||
scheme: u.Scheme,
|
||||
readHost: u.Host,
|
||||
creds: creds,
|
||||
tc: tc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -205,12 +220,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
|
|||
// This is an experimental API and not intended for public use.
|
||||
func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
opts = append(defaultGRPCOptions(), opts...)
|
||||
g, err := gapic.NewClient(ctx, opts...)
|
||||
tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{gc: g}, nil
|
||||
return &Client{tc: tc, useGRPC: true}, nil
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
|
|
@ -221,8 +236,8 @@ func (c *Client) Close() error {
|
|||
c.hc = nil
|
||||
c.raw = nil
|
||||
c.creds = nil
|
||||
if c.gc != nil {
|
||||
return c.gc.Close()
|
||||
if c.tc != nil {
|
||||
return c.tc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -504,13 +519,13 @@ func v2SanitizeHeaders(hdrs []string) []string {
|
|||
// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers.
|
||||
//
|
||||
// V4 does a couple things differently from V2:
|
||||
// - Headers get sorted by key, instead of by key:value. We do this in
|
||||
// signedURLV4.
|
||||
// - There's no canonical regexp: we simply split headers on :.
|
||||
// - We don't exclude canonical headers.
|
||||
// - We replace leading and trailing spaces in header values, like v2, but also
|
||||
// all intermediate space duplicates get stripped. That is, there's only ever
|
||||
// a single consecutive space.
|
||||
// - Headers get sorted by key, instead of by key:value. We do this in
|
||||
// signedURLV4.
|
||||
// - There's no canonical regexp: we simply split headers on :.
|
||||
// - We don't exclude canonical headers.
|
||||
// - We replace leading and trailing spaces in header values, like v2, but also
|
||||
// all intermediate space duplicates get stripped. That is, there's only ever
|
||||
// a single consecutive space.
|
||||
func v4SanitizeHeaders(hdrs []string) []string {
|
||||
headerMap := map[string][]string{}
|
||||
for _, hdr := range hdrs {
|
||||
|
|
@ -902,27 +917,8 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
|
|||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
|
||||
if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.userProject != "" {
|
||||
call.UserProject(o.userProject)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true, setRetryHeaderHTTP(call))
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
opts := makeStorageOpts(true, o.retry, o.userProject)
|
||||
return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...)
|
||||
}
|
||||
|
||||
// Update updates an object with the provided attributes. See
|
||||
|
|
@ -935,99 +931,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
|
|||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var attrs ObjectAttrs
|
||||
// Lists of fields to send, and set to null, in the JSON.
|
||||
var forceSendFields, nullFields []string
|
||||
if uattrs.ContentType != nil {
|
||||
attrs.ContentType = optional.ToString(uattrs.ContentType)
|
||||
// For ContentType, sending the empty string is a no-op.
|
||||
// Instead we send a null.
|
||||
if attrs.ContentType == "" {
|
||||
nullFields = append(nullFields, "ContentType")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "ContentType")
|
||||
}
|
||||
}
|
||||
if uattrs.ContentLanguage != nil {
|
||||
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
|
||||
// For ContentLanguage it's an error to send the empty string.
|
||||
// Instead we send a null.
|
||||
if attrs.ContentLanguage == "" {
|
||||
nullFields = append(nullFields, "ContentLanguage")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "ContentLanguage")
|
||||
}
|
||||
}
|
||||
if uattrs.ContentEncoding != nil {
|
||||
attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
|
||||
forceSendFields = append(forceSendFields, "ContentEncoding")
|
||||
}
|
||||
if uattrs.ContentDisposition != nil {
|
||||
attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
|
||||
forceSendFields = append(forceSendFields, "ContentDisposition")
|
||||
}
|
||||
if uattrs.CacheControl != nil {
|
||||
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
|
||||
forceSendFields = append(forceSendFields, "CacheControl")
|
||||
}
|
||||
if uattrs.EventBasedHold != nil {
|
||||
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
|
||||
forceSendFields = append(forceSendFields, "EventBasedHold")
|
||||
}
|
||||
if uattrs.TemporaryHold != nil {
|
||||
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
|
||||
forceSendFields = append(forceSendFields, "TemporaryHold")
|
||||
}
|
||||
if !uattrs.CustomTime.IsZero() {
|
||||
attrs.CustomTime = uattrs.CustomTime
|
||||
forceSendFields = append(forceSendFields, "CustomTime")
|
||||
}
|
||||
if uattrs.Metadata != nil {
|
||||
attrs.Metadata = uattrs.Metadata
|
||||
if len(attrs.Metadata) == 0 {
|
||||
// Sending the empty map is a no-op. We send null instead.
|
||||
nullFields = append(nullFields, "Metadata")
|
||||
} else {
|
||||
forceSendFields = append(forceSendFields, "Metadata")
|
||||
}
|
||||
}
|
||||
if uattrs.ACL != nil {
|
||||
attrs.ACL = uattrs.ACL
|
||||
// It's an error to attempt to delete the ACL, so
|
||||
// we don't append to nullFields here.
|
||||
forceSendFields = append(forceSendFields, "Acl")
|
||||
}
|
||||
rawObj := attrs.toRawObject(o.bucket)
|
||||
rawObj.ForceSendFields = forceSendFields
|
||||
rawObj.NullFields = nullFields
|
||||
call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
|
||||
if err := applyConds("Update", o.gen, o.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.userProject != "" {
|
||||
call.UserProject(o.userProject)
|
||||
}
|
||||
if uattrs.PredefinedACL != "" {
|
||||
call.PredefinedAcl(uattrs.PredefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
var isIdempotent bool
|
||||
if o.conds != nil && o.conds.MetagenerationMatch != 0 {
|
||||
isIdempotent = true
|
||||
}
|
||||
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusNotFound {
|
||||
return nil, ErrObjectNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0
|
||||
opts := makeStorageOpts(isIdempotent, o.retry, o.userProject)
|
||||
return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...)
|
||||
}
|
||||
|
||||
// BucketName returns the name of the bucket.
|
||||
|
|
@ -1047,11 +953,12 @@ func (o *ObjectHandle) ObjectName() string {
|
|||
//
|
||||
// For example, to change ContentType and delete ContentEncoding and
|
||||
// Metadata, use
|
||||
// ObjectAttrsToUpdate{
|
||||
// ContentType: "text/html",
|
||||
// ContentEncoding: "",
|
||||
// Metadata: map[string]string{},
|
||||
// }
|
||||
//
|
||||
// ObjectAttrsToUpdate{
|
||||
// ContentType: "text/html",
|
||||
// ContentEncoding: "",
|
||||
// Metadata: map[string]string{},
|
||||
// }
|
||||
type ObjectAttrsToUpdate struct {
|
||||
EventBasedHold optional.Bool
|
||||
TemporaryHold optional.Bool
|
||||
|
|
@ -1074,27 +981,11 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
|
|||
if err := o.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
|
||||
if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
|
||||
return err
|
||||
}
|
||||
if o.userProject != "" {
|
||||
call.UserProject(o.userProject)
|
||||
}
|
||||
// Encryption doesn't apply to Delete.
|
||||
setClientHeader(call.Header())
|
||||
var isIdempotent bool
|
||||
// Delete is idempotent if GenerationMatch or Generation have been passed in.
|
||||
// The default generation is negative to get the latest version of the object.
|
||||
if (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 {
|
||||
isIdempotent = true
|
||||
}
|
||||
err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent, setRetryHeaderHTTP(call))
|
||||
var e *googleapi.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusNotFound {
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
return err
|
||||
isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0
|
||||
opts := makeStorageOpts(isIdempotent, o.retry, o.userProject)
|
||||
return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...)
|
||||
}
|
||||
|
||||
// ReadCompressed when true causes the read to happen without decompressing.
|
||||
|
|
@ -1203,8 +1094,11 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
|
|||
}
|
||||
|
||||
// For now, there are only globally unique buckets, and "_" is the alias
|
||||
// project ID for such buckets.
|
||||
b = bucketResourceName("_", b)
|
||||
// project ID for such buckets. If the bucket is not provided, like in the
|
||||
// destination ObjectAttrs of a Copy, do not attempt to format it.
|
||||
if b != "" {
|
||||
b = bucketResourceName(globalProjectAlias, b)
|
||||
}
|
||||
|
||||
return &storagepb.Object{
|
||||
Bucket: b,
|
||||
|
|
@ -1231,6 +1125,49 @@ func (o *ObjectAttrs) toProtoObject(b string) *storagepb.Object {
|
|||
}
|
||||
}
|
||||
|
||||
// toProtoObject copies the attributes to update from uattrs to the proto library's Object type.
|
||||
func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storagepb.Object {
|
||||
o := &storagepb.Object{
|
||||
Name: object,
|
||||
Bucket: bucket,
|
||||
}
|
||||
if uattrs == nil {
|
||||
return o
|
||||
}
|
||||
|
||||
if uattrs.EventBasedHold != nil {
|
||||
o.EventBasedHold = proto.Bool(optional.ToBool(uattrs.EventBasedHold))
|
||||
}
|
||||
if uattrs.TemporaryHold != nil {
|
||||
o.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
|
||||
}
|
||||
if uattrs.ContentType != nil {
|
||||
o.ContentType = optional.ToString(uattrs.ContentType)
|
||||
}
|
||||
if uattrs.ContentLanguage != nil {
|
||||
o.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
|
||||
}
|
||||
if uattrs.ContentEncoding != nil {
|
||||
o.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
|
||||
}
|
||||
if uattrs.ContentDisposition != nil {
|
||||
o.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
|
||||
}
|
||||
if uattrs.CacheControl != nil {
|
||||
o.CacheControl = optional.ToString(uattrs.CacheControl)
|
||||
}
|
||||
if !uattrs.CustomTime.IsZero() {
|
||||
o.CustomTime = toProtoTimestamp(uattrs.CustomTime)
|
||||
}
|
||||
if uattrs.ACL != nil {
|
||||
o.Acl = toProtoObjectACL(uattrs.ACL)
|
||||
}
|
||||
|
||||
// TODO(cathyo): Handle metadata. Pending b/230510191.
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
|
||||
type ObjectAttrs struct {
|
||||
// Bucket is the name of the bucket containing this GCS object.
|
||||
|
|
@ -1312,6 +1249,10 @@ type ObjectAttrs struct {
|
|||
|
||||
// Metadata represents user-provided metadata, in key/value pairs.
|
||||
// It can be nil if no metadata is provided.
|
||||
//
|
||||
// For object downloads using Reader, metadata keys are sent as headers.
|
||||
// Therefore, avoid setting metadata keys using characters that are not valid
|
||||
// for headers. See https://www.rfc-editor.org/rfc/rfc7230#section-3.2.6.
|
||||
Metadata map[string]string
|
||||
|
||||
// Generation is the generation number of the object's content.
|
||||
|
|
@ -1786,6 +1727,33 @@ func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall
|
|||
return nil
|
||||
}
|
||||
|
||||
func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error {
|
||||
if gen >= 0 {
|
||||
call.SourceGeneration = gen
|
||||
}
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if err := conds.validate("CopyTo source"); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case conds.GenerationMatch != 0:
|
||||
call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch)
|
||||
case conds.GenerationNotMatch != 0:
|
||||
call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch)
|
||||
case conds.DoesNotExist:
|
||||
call.IfSourceGenerationMatch = proto.Int64(0)
|
||||
}
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch)
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setConditionField sets a field on a *raw.WhateverCall.
|
||||
// We can't use anonymous interfaces because the return type is
|
||||
// different, since the field setters are builders.
|
||||
|
|
@ -1907,8 +1875,8 @@ func (ws *withPolicy) apply(config *retryConfig) {
|
|||
|
||||
// WithErrorFunc allows users to pass a custom function to the retryer. Errors
|
||||
// will be retried if and only if `shouldRetry(err)` returns true.
|
||||
// By default, the following errors are retried (see invoke.go for the default
|
||||
// shouldRetry function):
|
||||
// By default, the following errors are retried (see ShouldRetry for the default
|
||||
// function):
|
||||
//
|
||||
// - HTTP responses with codes 408, 429, 502, 503, and 504.
|
||||
//
|
||||
|
|
@ -1919,7 +1887,8 @@ func (ws *withPolicy) apply(config *retryConfig) {
|
|||
// - Wrapped versions of these errors.
|
||||
//
|
||||
// This option can be used to retry on a different set of errors than the
|
||||
// default.
|
||||
// default. Users can use the default ShouldRetry function inside their custom
|
||||
// function if they only want to make minor modifications to default behavior.
|
||||
func WithErrorFunc(shouldRetry func(err error) bool) RetryOption {
|
||||
return &withErrorFunc{
|
||||
shouldRetry: shouldRetry,
|
||||
|
|
@ -1992,26 +1961,31 @@ func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) erro
|
|||
if copySource {
|
||||
cs = "copy-source-"
|
||||
}
|
||||
headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256")
|
||||
headers.Set("x-goog-"+cs+"encryption-algorithm", aes256Algorithm)
|
||||
headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key))
|
||||
keyHash := sha256.Sum256(key)
|
||||
headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// toProtoCommonObjectRequestParams sets customer-supplied encryption to the proto library's CommonObjectRequestParams.
|
||||
func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequestParams {
|
||||
if key == nil {
|
||||
return nil
|
||||
}
|
||||
keyHash := sha256.Sum256(key)
|
||||
return &storagepb.CommonObjectRequestParams{
|
||||
EncryptionAlgorithm: aes256Algorithm,
|
||||
EncryptionKeyBytes: key,
|
||||
EncryptionKeySha256Bytes: keyHash[:],
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
|
||||
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
|
||||
r := c.raw.Projects.ServiceAccount.Get(projectID)
|
||||
var res *raw.ServiceAccount
|
||||
var err error
|
||||
err = run(ctx, func() error {
|
||||
res, err = r.Context(ctx).Do()
|
||||
return err
|
||||
}, c.retry, true, setRetryHeaderHTTP(r))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return res.EmailAddress, nil
|
||||
o := makeStorageOpts(true, c.retry, "")
|
||||
return c.tc.GetServiceAccount(ctx, projectID, o...)
|
||||
|
||||
}
|
||||
|
||||
// bucketResourceName formats the given project ID and bucketResourceName ID
|
||||
|
|
|
|||
495
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
495
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
|
|
@ -16,28 +16,12 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
// Maximum amount of content that can be sent per WriteObjectRequest message.
|
||||
// A buffer reaching this amount will precipitate a flush of the buffer.
|
||||
//
|
||||
// This is only used for the gRPC-based Writer.
|
||||
maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES)
|
||||
)
|
||||
|
||||
// A Writer writes a Cloud Storage object.
|
||||
|
|
@ -123,112 +107,6 @@ type Writer struct {
|
|||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
|
||||
// The gRPC client-stream used for sending buffers.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
stream storagepb.Storage_WriteObjectClient
|
||||
|
||||
// The Resumable Upload ID started by a gRPC-based Writer.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
upid string
|
||||
}
|
||||
|
||||
func (w *Writer) open() error {
|
||||
if err := w.validateWriteAttrs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
w.pw = pw
|
||||
w.opened = true
|
||||
|
||||
go w.monitorCancel()
|
||||
|
||||
attrs := w.ObjectAttrs
|
||||
mediaOpts := []googleapi.MediaOption{
|
||||
googleapi.ChunkSize(w.ChunkSize),
|
||||
}
|
||||
if c := attrs.ContentType; c != "" {
|
||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||
}
|
||||
if w.ChunkRetryDeadline != 0 {
|
||||
mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(w.ChunkRetryDeadline))
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
||||
rawObj := attrs.toRawObject(w.o.bucket)
|
||||
if w.SendCRC32C {
|
||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||
}
|
||||
if w.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||
}
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(w.ctx).
|
||||
Name(w.o.object)
|
||||
|
||||
if w.ProgressFunc != nil {
|
||||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
||||
}
|
||||
if attrs.KMSKeyName != "" {
|
||||
call.KmsKeyName(attrs.KMSKeyName)
|
||||
}
|
||||
if attrs.PredefinedACL != "" {
|
||||
call.PredefinedAcl(attrs.PredefinedACL)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||
if err == nil {
|
||||
if w.o.userProject != "" {
|
||||
call.UserProject(w.o.userProject)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
|
||||
// The internals that perform call.Do automatically retry both the initial
|
||||
// call to set up the upload as well as calls to upload individual chunks
|
||||
// for a resumable upload (as long as the chunk size is non-zero). Hence
|
||||
// there is no need to add retries here.
|
||||
|
||||
// Retry only when the operation is idempotent or the retry policy is RetryAlways.
|
||||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
|
||||
var useRetry bool
|
||||
if (w.o.retry == nil || w.o.retry.policy == RetryIdempotent) && isIdempotent {
|
||||
useRetry = true
|
||||
} else if w.o.retry != nil && w.o.retry.policy == RetryAlways {
|
||||
useRetry = true
|
||||
}
|
||||
if useRetry {
|
||||
if w.o.retry != nil {
|
||||
call.WithRetry(w.o.retry.backoff, w.o.retry.shouldRetry)
|
||||
} else {
|
||||
call.WithRetry(nil, nil)
|
||||
}
|
||||
}
|
||||
resp, err = call.Do()
|
||||
}
|
||||
if err != nil {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
w.obj = newObject(resp)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
|
|
@ -248,12 +126,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
|||
return 0, werr
|
||||
}
|
||||
if !w.opened {
|
||||
// gRPC client has been initialized - use gRPC to upload.
|
||||
if w.o.c.gc != nil {
|
||||
if err := w.openGRPC(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else if err := w.open(); err != nil {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
|
@ -277,7 +150,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
|
|||
// can be retrieved by calling Attrs.
|
||||
func (w *Writer) Close() error {
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
if err := w.openWriter(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -293,6 +166,40 @@ func (w *Writer) Close() error {
|
|||
return w.err
|
||||
}
|
||||
|
||||
func (w *Writer) openWriter() (err error) {
|
||||
if err := w.validateWriteAttrs(); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.o.gen != defaultGen {
|
||||
return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen)
|
||||
}
|
||||
|
||||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
|
||||
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
|
||||
go w.monitorCancel()
|
||||
params := &openWriterParams{
|
||||
ctx: w.ctx,
|
||||
chunkSize: w.ChunkSize,
|
||||
chunkRetryDeadline: w.ChunkRetryDeadline,
|
||||
bucket: w.o.bucket,
|
||||
attrs: &w.ObjectAttrs,
|
||||
conds: w.o.conds,
|
||||
encryptionKey: w.o.encryptionKey,
|
||||
sendCRC32C: w.SendCRC32C,
|
||||
donec: w.donec,
|
||||
setError: w.error,
|
||||
progress: w.progress,
|
||||
setObj: func(o *ObjectAttrs) { w.obj = o },
|
||||
}
|
||||
w.pw, err = w.o.c.tc.OpenWriter(params, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.opened = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// monitorCancel is intended to be used as a background goroutine. It monitors the
|
||||
// context, and when it observes that the context has been canceled, it manually
|
||||
// closes things that do not take a context.
|
||||
|
|
@ -361,333 +268,3 @@ func (w *Writer) error(err error) {
|
|||
w.err = err
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
// openGRPC initializes a pipe for the user to write data to, and a routine to
|
||||
// read from that pipe and upload the data to GCS via gRPC.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (w *Writer) openGRPC() error {
|
||||
if err := w.validateWriteAttrs(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
w.pw = pw
|
||||
w.opened = true
|
||||
|
||||
go w.monitorCancel()
|
||||
|
||||
bufSize := w.ChunkSize
|
||||
if w.ChunkSize == 0 {
|
||||
// TODO: Should we actually use the minimum of 256 KB here when the user
|
||||
// indicates they want minimal memory usage? We cannot do a zero-copy,
|
||||
// bufferless upload like HTTP/JSON can.
|
||||
// TODO: We need to determine if we can avoid starting a
|
||||
// resumable upload when the user *plans* to send more than bufSize but
|
||||
// with a bufferless upload.
|
||||
bufSize = maxPerMessageWriteSize
|
||||
}
|
||||
buf := make([]byte, bufSize)
|
||||
|
||||
var offset int64
|
||||
|
||||
// This function reads the data sent to the pipe and sends sets of messages
|
||||
// on the gRPC client-stream as the buffer is filled.
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
||||
// Loop until there is an error or the Object has been finalized.
|
||||
for {
|
||||
// Note: This blocks until either the buffer is full or EOF is read.
|
||||
recvd, doneReading, err := read(pr, buf)
|
||||
if err != nil {
|
||||
err = checkCanceled(err)
|
||||
w.error(err)
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
toWrite := buf[:recvd]
|
||||
|
||||
// TODO: Figure out how to set up encryption via CommonObjectRequestParams.
|
||||
|
||||
// The chunk buffer is full, but there is no end in sight. This
|
||||
// means that a resumable upload will need to be used to send
|
||||
// multiple chunks, until we are done reading data. Start a
|
||||
// resumable upload if it has not already been started.
|
||||
// Otherwise, all data will be sent over a single gRPC stream.
|
||||
if !doneReading && w.upid == "" {
|
||||
err = w.startResumableUpload()
|
||||
if err != nil {
|
||||
err = checkCanceled(err)
|
||||
w.error(err)
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
o, off, finalized, err := w.uploadBuffer(toWrite, recvd, offset, doneReading)
|
||||
if err != nil {
|
||||
err = checkCanceled(err)
|
||||
w.error(err)
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
// At this point, the current buffer has been uploaded. Capture the
|
||||
// committed offset here in case the upload was not finalized and
|
||||
// another chunk is to be uploaded.
|
||||
offset = off
|
||||
w.progress(offset)
|
||||
|
||||
// When we are done reading data and the chunk has been finalized,
|
||||
// we are done.
|
||||
if doneReading && finalized {
|
||||
// Build Object from server's response.
|
||||
w.obj = newObjectFromProto(o)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// startResumableUpload initializes a Resumable Upload with gRPC and sets the
|
||||
// upload ID on the Writer.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (w *Writer) startResumableUpload() error {
|
||||
spec, err := w.writeObjectSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
upres, err := w.o.c.gc.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
|
||||
WriteObjectSpec: spec,
|
||||
})
|
||||
|
||||
w.upid = upres.GetUploadId()
|
||||
return err
|
||||
}
|
||||
|
||||
// queryProgress is a helper that queries the status of the resumable upload
|
||||
// associated with the given upload ID.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (w *Writer) queryProgress() (int64, error) {
|
||||
q, err := w.o.c.gc.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid})
|
||||
|
||||
// q.GetCommittedSize() will return 0 if q is nil.
|
||||
return q.GetPersistedSize(), err
|
||||
}
|
||||
|
||||
// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if
|
||||
// uploading a chunk for a resumable uploadBuffer), and will mark the write as
|
||||
// finished if we are done receiving data from the user. The resulting write
|
||||
// offset after uploading the buffer is returned, as well as a boolean
|
||||
// indicating if the Object has been finalized. If it has been finalized, the
|
||||
// final Object will be returned as well. Finalizing the upload is primarily
|
||||
// important for Resumable Uploads. A simple or multi-part upload will always
|
||||
// be finalized once the entire buffer has been written.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (w *Writer) uploadBuffer(buf []byte, recvd int, start int64, doneReading bool) (*storagepb.Object, int64, bool, error) {
|
||||
var err error
|
||||
var finishWrite bool
|
||||
var sent, limit int = 0, maxPerMessageWriteSize
|
||||
offset := start
|
||||
for {
|
||||
first := sent == 0
|
||||
// This indicates that this is the last message and the remaining
|
||||
// data fits in one message.
|
||||
belowLimit := recvd-sent <= limit
|
||||
if belowLimit {
|
||||
limit = recvd - sent
|
||||
}
|
||||
if belowLimit && doneReading {
|
||||
finishWrite = true
|
||||
}
|
||||
|
||||
// Prepare chunk section for upload.
|
||||
data := buf[sent : sent+limit]
|
||||
req := &storagepb.WriteObjectRequest{
|
||||
Data: &storagepb.WriteObjectRequest_ChecksummedData{
|
||||
ChecksummedData: &storagepb.ChecksummedData{
|
||||
Content: data,
|
||||
},
|
||||
},
|
||||
WriteOffset: offset,
|
||||
FinishWrite: finishWrite,
|
||||
}
|
||||
|
||||
// Open a new stream and set the first_message field on the request.
|
||||
// The first message on the WriteObject stream must either be the
|
||||
// Object or the Resumable Upload ID.
|
||||
if first {
|
||||
w.stream, err = w.o.c.gc.WriteObject(w.ctx)
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
|
||||
if w.upid != "" {
|
||||
req.FirstMessage = &storagepb.WriteObjectRequest_UploadId{UploadId: w.upid}
|
||||
} else {
|
||||
spec, err := w.writeObjectSpec()
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{
|
||||
WriteObjectSpec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Currently the checksums are only sent on the first message
|
||||
// of the stream, but in the future, we must also support sending it
|
||||
// on the *last* message of the stream (instead of the first).
|
||||
if w.SendCRC32C {
|
||||
req.ObjectChecksums = &storagepb.ObjectChecksums{
|
||||
Crc32C: proto.Uint32(w.CRC32C),
|
||||
Md5Hash: w.MD5,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = w.stream.Send(req)
|
||||
if err == io.EOF {
|
||||
// err was io.EOF. The client-side of a stream only gets an EOF on Send
|
||||
// when the backend closes the stream and wants to return an error
|
||||
// status. Closing the stream receives the status as an error.
|
||||
_, err = w.stream.CloseAndRecv()
|
||||
|
||||
// Retriable errors mean we should start over and attempt to
|
||||
// resend the entire buffer via a new stream.
|
||||
// If not retriable, falling through will return the error received
|
||||
// from closing the stream.
|
||||
if shouldRetry(err) {
|
||||
sent = 0
|
||||
finishWrite = false
|
||||
// TODO: Add test case for failure modes of querying progress.
|
||||
offset, err = w.determineOffset(start)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
|
||||
// Update the immediate stream's sent total and the upload offset with
|
||||
// the data sent.
|
||||
sent += len(data)
|
||||
offset += int64(len(data))
|
||||
|
||||
// Not done sending data, do not attempt to commit it yet, loop around
|
||||
// and send more data.
|
||||
if recvd-sent > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Done sending data. Close the stream to "commit" the data sent.
|
||||
resp, finalized, err := w.commit()
|
||||
// Retriable errors mean we should start over and attempt to
|
||||
// resend the entire buffer via a new stream.
|
||||
// If not retriable, falling through will return the error received
|
||||
// from closing the stream.
|
||||
if shouldRetry(err) {
|
||||
sent = 0
|
||||
finishWrite = false
|
||||
offset, err = w.determineOffset(start)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
|
||||
return resp.GetResource(), offset, finalized, nil
|
||||
}
|
||||
}
|
||||
|
||||
// determineOffset either returns the offset given to it in the case of a simple
|
||||
// upload, or queries the write status in the case a resumable upload is being
|
||||
// used.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (w *Writer) determineOffset(offset int64) (int64, error) {
|
||||
// For a Resumable Upload, we must start from however much data
|
||||
// was committed.
|
||||
if w.upid != "" {
|
||||
committed, err := w.queryProgress()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
offset = committed
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
// commit closes the stream to commit the data sent and potentially receive
|
||||
// the finalized object if finished uploading. If the last request sent
|
||||
// indicated that writing was finished, the Object will be finalized and
|
||||
// returned. If not, then the Object will be nil, and the boolean returned will
|
||||
// be false.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (w *Writer) commit() (*storagepb.WriteObjectResponse, bool, error) {
|
||||
finalized := true
|
||||
resp, err := w.stream.CloseAndRecv()
|
||||
if err == io.EOF {
|
||||
// Closing a stream for a resumable upload finish_write = false results
|
||||
// in an EOF which can be ignored, as we aren't done uploading yet.
|
||||
finalized = false
|
||||
err = nil
|
||||
}
|
||||
// Drop the stream reference as it has been closed.
|
||||
w.stream = nil
|
||||
|
||||
return resp, finalized, err
|
||||
}
|
||||
|
||||
// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's
|
||||
// ObjectAttrs and applies its Conditions. This is only used for gRPC.
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func (w *Writer) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
|
||||
spec := &storagepb.WriteObjectSpec{
|
||||
Resource: w.ObjectAttrs.toProtoObject(w.o.bucket),
|
||||
}
|
||||
// WriteObject doesn't support the generation condition, so use -1.
|
||||
if err := applyCondsProto("WriteObject", -1, w.o.conds, spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// read copies the data in the reader to the given buffer and reports how much
|
||||
// data was read into the buffer and if there is no more data to read (EOF).
|
||||
//
|
||||
// This is an experimental API and not intended for public use.
|
||||
func read(r io.Reader, buf []byte) (int, bool, error) {
|
||||
// Set n to -1 to start the Read loop.
|
||||
var n, recvd int = -1, 0
|
||||
var err error
|
||||
for err == nil && n != 0 {
|
||||
// The routine blocks here until data is received.
|
||||
n, err = r.Read(buf[recvd:])
|
||||
recvd += n
|
||||
}
|
||||
var done bool
|
||||
if err == io.EOF {
|
||||
done = true
|
||||
err = nil
|
||||
}
|
||||
return recvd, done, err
|
||||
}
|
||||
|
||||
func checkCanceled(err error) error {
|
||||
if status.Code(err) == codes.Canceled {
|
||||
return context.Canceled
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue