build(deps): bump cloud.google.com/go/storage from 1.18.2 to 1.22.0
Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.18.2 to 1.22.0. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...spanner/v1.22.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
2ad93b28db
commit
9336d66c32
809 changed files with 84313 additions and 268932 deletions
547
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
547
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
|
|
@ -27,12 +27,16 @@ import (
|
|||
"cloud.google.com/go/compute/metadata"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"github.com/googleapis/go-type-adapters/adapters"
|
||||
"golang.org/x/xerrors"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iamcredentials/v1"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
"google.golang.org/genproto/googleapis/storage/v2"
|
||||
storagepb "google.golang.org/genproto/googleapis/storage/v2"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// BucketHandle provides operations on a Google Cloud Storage bucket.
|
||||
|
|
@ -44,6 +48,7 @@ type BucketHandle struct {
|
|||
defaultObjectACL ACLHandle
|
||||
conds *BucketConditions
|
||||
userProject string // project for Requester Pays buckets
|
||||
retry *retryConfig
|
||||
}
|
||||
|
||||
// Bucket returns a BucketHandle, which provides operations on the named bucket.
|
||||
|
|
@ -54,18 +59,22 @@ type BucketHandle struct {
|
|||
// found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (c *Client) Bucket(name string) *BucketHandle {
|
||||
retry := c.retry.clone()
|
||||
return &BucketHandle{
|
||||
c: c,
|
||||
name: name,
|
||||
acl: ACLHandle{
|
||||
c: c,
|
||||
bucket: name,
|
||||
retry: retry,
|
||||
},
|
||||
defaultObjectACL: ACLHandle{
|
||||
c: c,
|
||||
bucket: name,
|
||||
isDefault: true,
|
||||
retry: retry,
|
||||
},
|
||||
retry: retry,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -95,7 +104,7 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
|
|||
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
|
||||
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
|
||||
}
|
||||
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
|
||||
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true)
|
||||
}
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
|
|
@ -107,7 +116,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
|
||||
|
||||
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
||||
|
|
@ -144,6 +154,7 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
|||
// for valid object names can be found at:
|
||||
// https://cloud.google.com/storage/docs/naming-objects
|
||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
retry := b.retry.clone()
|
||||
return &ObjectHandle{
|
||||
c: b.c,
|
||||
bucket: b.name,
|
||||
|
|
@ -153,9 +164,11 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
|
|||
bucket: b.name,
|
||||
object: name,
|
||||
userProject: b.userProject,
|
||||
retry: retry,
|
||||
},
|
||||
gen: -1,
|
||||
userProject: b.userProject,
|
||||
retry: retry,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -169,10 +182,10 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
|
|||
return nil, err
|
||||
}
|
||||
var resp *raw.Bucket
|
||||
err = runWithRetry(ctx, func() error {
|
||||
err = run(ctx, func() error {
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
}, b.retry, true)
|
||||
var e *googleapi.Error
|
||||
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrBucketNotExist
|
||||
|
|
@ -210,12 +223,20 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
|
|||
if uattrs.PredefinedDefaultObjectACL != "" {
|
||||
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
|
||||
}
|
||||
// TODO(jba): retry iff metagen is set?
|
||||
rb, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
|
||||
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
|
||||
|
||||
var rawBucket *raw.Bucket
|
||||
call := func() error {
|
||||
rb, err := req.Context(ctx).Do()
|
||||
rawBucket = rb
|
||||
return err
|
||||
}
|
||||
|
||||
if err := run(ctx, call, b.retry, isIdempotent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(rb)
|
||||
return newBucket(rawBucket)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
||||
|
|
@ -282,8 +303,54 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string,
|
|||
return SignedURL(b.name, object, newopts)
|
||||
}
|
||||
|
||||
// TODO: Add a similar wrapper for GenerateSignedPostPolicyV4 allowing users to
|
||||
// omit PrivateKey/SignBytes
|
||||
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
|
||||
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
|
||||
//
|
||||
// This method only requires the Expires field in the specified PostPolicyV4Options
|
||||
// to be non-nil. If not provided, it attempts to fill the GoogleAccessID and PrivateKey
|
||||
// from the GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||
// If you are authenticating with a custom HTTP client, Service Account based
|
||||
// auto-detection will be hindered.
|
||||
//
|
||||
// If no private key is found, it attempts to use the GoogleAccessID to sign the URL.
|
||||
// This requires the IAM Service Account Credentials API to be enabled
|
||||
// (https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview)
|
||||
// and iam.serviceAccounts.signBlob permissions on the GoogleAccessID service account.
|
||||
// If you do not want these fields set for you, you may pass them in through opts or use
|
||||
// GenerateSignedPostPolicyV4(bucket, name string, opts *PostPolicyV4Options) instead.
|
||||
func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
|
||||
if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
|
||||
return GenerateSignedPostPolicyV4(b.name, object, opts)
|
||||
}
|
||||
// Make a copy of opts so we don't modify the pointer parameter.
|
||||
newopts := opts.clone()
|
||||
|
||||
if newopts.GoogleAccessID == "" {
|
||||
id, err := b.detectDefaultGoogleAccessID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newopts.GoogleAccessID = id
|
||||
}
|
||||
if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 {
|
||||
if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
|
||||
var sa struct {
|
||||
PrivateKey string `json:"private_key"`
|
||||
}
|
||||
err := json.Unmarshal(b.c.creds.JSON, &sa)
|
||||
if err == nil && sa.PrivateKey != "" {
|
||||
newopts.PrivateKey = []byte(sa.PrivateKey)
|
||||
}
|
||||
}
|
||||
|
||||
// Don't error out if we can't unmarshal the private key from the client,
|
||||
// fallback to the default sign function for the service account.
|
||||
if len(newopts.PrivateKey) == 0 {
|
||||
newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID)
|
||||
}
|
||||
}
|
||||
return GenerateSignedPostPolicyV4(b.name, object, newopts)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
|
||||
returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)")
|
||||
|
|
@ -461,6 +528,12 @@ type BucketAttrs struct {
|
|||
// The project number of the project the bucket belongs to.
|
||||
// This field is read-only.
|
||||
ProjectNumber uint64
|
||||
|
||||
// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
|
||||
// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
|
||||
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
|
||||
// more information.
|
||||
RPO RPO
|
||||
}
|
||||
|
||||
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
|
||||
|
|
@ -728,9 +801,40 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
|||
Etag: b.Etag,
|
||||
LocationType: b.LocationType,
|
||||
ProjectNumber: b.ProjectNumber,
|
||||
RPO: toRPO(b),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return &BucketAttrs{
|
||||
Name: parseBucketName(b.GetName()),
|
||||
Location: b.GetLocation(),
|
||||
MetaGeneration: b.GetMetageneration(),
|
||||
DefaultEventBasedHold: b.GetDefaultEventBasedHold(),
|
||||
StorageClass: b.GetStorageClass(),
|
||||
Created: b.GetCreateTime().AsTime(),
|
||||
VersioningEnabled: b.GetVersioning().GetEnabled(),
|
||||
ACL: toBucketACLRulesFromProto(b.GetAcl()),
|
||||
DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()),
|
||||
Labels: b.GetLabels(),
|
||||
RequesterPays: b.GetBilling().GetRequesterPays(),
|
||||
Lifecycle: toLifecycleFromProto(b.GetLifecycle()),
|
||||
RetentionPolicy: toRetentionPolicyFromProto(b.GetRetentionPolicy()),
|
||||
CORS: toCORSFromProto(b.GetCors()),
|
||||
Encryption: toBucketEncryptionFromProto(b.GetEncryption()),
|
||||
Logging: toBucketLoggingFromProto(b.GetLogging()),
|
||||
Website: toBucketWebsiteFromProto(b.GetWebsite()),
|
||||
BucketPolicyOnly: toBucketPolicyOnlyFromProto(b.GetIamConfig()),
|
||||
UniformBucketLevelAccess: toUniformBucketLevelAccessFromProto(b.GetIamConfig()),
|
||||
PublicAccessPrevention: toPublicAccessPreventionFromProto(b.GetIamConfig()),
|
||||
LocationType: b.GetLocationType(),
|
||||
RPO: toRPOFromProto(b),
|
||||
}
|
||||
}
|
||||
|
||||
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
|
||||
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
||||
// Copy label map.
|
||||
|
|
@ -780,6 +884,67 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
|||
Logging: b.Logging.toRawBucketLogging(),
|
||||
Website: b.Website.toRawBucketWebsite(),
|
||||
IamConfiguration: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
|
||||
if b == nil {
|
||||
return &storagepb.Bucket{}
|
||||
}
|
||||
|
||||
// Copy label map.
|
||||
var labels map[string]string
|
||||
if len(b.Labels) > 0 {
|
||||
labels = make(map[string]string, len(b.Labels))
|
||||
for k, v := range b.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore VersioningEnabled if it is false. This is OK because
|
||||
// we only call this method when creating a bucket, and by default
|
||||
// new buckets have versioning off.
|
||||
var v *storagepb.Bucket_Versioning
|
||||
if b.VersioningEnabled {
|
||||
v = &storagepb.Bucket_Versioning{Enabled: true}
|
||||
}
|
||||
var bb *storagepb.Bucket_Billing
|
||||
if b.RequesterPays {
|
||||
bb = &storage.Bucket_Billing{RequesterPays: true}
|
||||
}
|
||||
var bktIAM *storagepb.Bucket_IamConfig
|
||||
if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
bktIAM = &storagepb.Bucket_IamConfig{}
|
||||
if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled {
|
||||
bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
|
||||
Enabled: true,
|
||||
}
|
||||
}
|
||||
// TODO(noahdietz): This will be switched to a string.
|
||||
//
|
||||
// if b.PublicAccessPrevention != PublicAccessPreventionUnknown {
|
||||
// bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String()
|
||||
// }
|
||||
}
|
||||
|
||||
return &storagepb.Bucket{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: toProtoBucketACL(b.ACL),
|
||||
DefaultObjectAcl: toProtoObjectACL(b.DefaultObjectACL),
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toProtoLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toProtoRetentionPolicy(),
|
||||
Cors: toProtoCORS(b.CORS),
|
||||
Encryption: b.Encryption.toProtoBucketEncryption(),
|
||||
Logging: b.Logging.toProtoBucketLogging(),
|
||||
Website: b.Website.toProtoBucketWebsite(),
|
||||
IamConfig: bktIAM,
|
||||
Rpo: b.RPO.String(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -889,6 +1054,12 @@ type BucketAttrsToUpdate struct {
|
|||
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
|
||||
PredefinedDefaultObjectACL string
|
||||
|
||||
// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
|
||||
// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
|
||||
// See https://cloud.google.com/storage/docs/managing-turbo-replication for
|
||||
// more information.
|
||||
RPO RPO
|
||||
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
|
|
@ -1001,7 +1172,10 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
|||
rb.DefaultObjectAcl = nil
|
||||
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
|
||||
}
|
||||
|
||||
rb.StorageClass = ua.StorageClass
|
||||
rb.Rpo = ua.RPO.String()
|
||||
|
||||
if ua.setLabels != nil || ua.deleteLabels != nil {
|
||||
rb.Labels = map[string]string{}
|
||||
for k, v := range ua.setLabels {
|
||||
|
|
@ -1081,10 +1255,10 @@ func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
|
|||
metageneration = b.conds.MetagenerationMatch
|
||||
}
|
||||
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
|
||||
return runWithRetry(ctx, func() error {
|
||||
return run(ctx, func() error {
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
}, b.retry, true)
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||
|
|
@ -1110,6 +1284,32 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{})
|
|||
return nil
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided request message using the conditions
|
||||
// in conds. msg is a protobuf Message that has fields if_metageneration_match
|
||||
// and if_metageneration_not_match.
|
||||
func applyBucketCondsProto(method string, conds *BucketConditions, msg proto.Message) error {
|
||||
rmsg := msg.ProtoReflect()
|
||||
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if err := conds.validate(method); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) {
|
||||
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
|
||||
}
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) {
|
||||
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
|
||||
if rp == nil {
|
||||
return nil
|
||||
|
|
@ -1119,6 +1319,15 @@ func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
|
|||
}
|
||||
}
|
||||
|
||||
func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionPolicy {
|
||||
if rp == nil {
|
||||
return nil
|
||||
}
|
||||
return &storagepb.Bucket_RetentionPolicy{
|
||||
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) {
|
||||
if rp == nil {
|
||||
return nil, nil
|
||||
|
|
@ -1134,6 +1343,17 @@ func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error)
|
|||
}, nil
|
||||
}
|
||||
|
||||
func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy {
|
||||
if rp == nil {
|
||||
return nil
|
||||
}
|
||||
return &RetentionPolicy{
|
||||
RetentionPeriod: time.Duration(rp.GetRetentionPeriod()) * time.Second,
|
||||
EffectiveTime: rp.GetEffectiveTime().AsTime(),
|
||||
IsLocked: rp.GetIsLocked(),
|
||||
}
|
||||
}
|
||||
|
||||
func toRawCORS(c []CORS) []*raw.BucketCors {
|
||||
var out []*raw.BucketCors
|
||||
for _, v := range c {
|
||||
|
|
@ -1147,6 +1367,19 @@ func toRawCORS(c []CORS) []*raw.BucketCors {
|
|||
return out
|
||||
}
|
||||
|
||||
func toProtoCORS(c []CORS) []*storagepb.Bucket_Cors {
|
||||
var out []*storagepb.Bucket_Cors
|
||||
for _, v := range c {
|
||||
out = append(out, &storagepb.Bucket_Cors{
|
||||
MaxAgeSeconds: int32(v.MaxAge / time.Second),
|
||||
Method: v.Methods,
|
||||
Origin: v.Origins,
|
||||
ResponseHeader: v.ResponseHeaders,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func toCORS(rc []*raw.BucketCors) []CORS {
|
||||
var out []CORS
|
||||
for _, v := range rc {
|
||||
|
|
@ -1160,6 +1393,19 @@ func toCORS(rc []*raw.BucketCors) []CORS {
|
|||
return out
|
||||
}
|
||||
|
||||
func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS {
|
||||
var out []CORS
|
||||
for _, v := range rc {
|
||||
out = append(out, CORS{
|
||||
MaxAge: time.Duration(v.GetMaxAgeSeconds()) * time.Second,
|
||||
Methods: v.GetMethod(),
|
||||
Origins: v.GetOrigin(),
|
||||
ResponseHeaders: v.GetResponseHeader(),
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
||||
var rl raw.BucketLifecycle
|
||||
if len(l.Rules) == 0 {
|
||||
|
|
@ -1203,6 +1449,51 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
|||
return &rl
|
||||
}
|
||||
|
||||
func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
|
||||
var rl storagepb.Bucket_Lifecycle
|
||||
|
||||
for _, r := range l.Rules {
|
||||
rr := &storagepb.Bucket_Lifecycle_Rule{
|
||||
Action: &storagepb.Bucket_Lifecycle_Rule_Action{
|
||||
Type: r.Action.Type,
|
||||
StorageClass: r.Action.StorageClass,
|
||||
},
|
||||
Condition: &storagepb.Bucket_Lifecycle_Rule_Condition{
|
||||
// Note: The Apiary types use int64 (even though the Discovery
|
||||
// doc states "format: int32"), so the client types used int64,
|
||||
// but the proto uses int32 so we have a potentially lossy
|
||||
// conversion.
|
||||
AgeDays: proto.Int32(int32(r.Condition.AgeInDays)),
|
||||
DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)),
|
||||
DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)),
|
||||
MatchesStorageClass: r.Condition.MatchesStorageClasses,
|
||||
NumNewerVersions: proto.Int32(int32(r.Condition.NumNewerVersions)),
|
||||
},
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
rr.Condition.IsLive = nil
|
||||
case Live:
|
||||
rr.Condition.IsLive = proto.Bool(true)
|
||||
case Archived:
|
||||
rr.Condition.IsLive = proto.Bool(false)
|
||||
}
|
||||
|
||||
if !r.Condition.CreatedBefore.IsZero() {
|
||||
rr.Condition.CreatedBefore = adapters.TimeToProtoDate(r.Condition.CreatedBefore)
|
||||
}
|
||||
if !r.Condition.CustomTimeBefore.IsZero() {
|
||||
rr.Condition.CustomTimeBefore = adapters.TimeToProtoDate(r.Condition.CustomTimeBefore)
|
||||
}
|
||||
if !r.Condition.NoncurrentTimeBefore.IsZero() {
|
||||
rr.Condition.NoncurrentTimeBefore = adapters.TimeToProtoDate(r.Condition.NoncurrentTimeBefore)
|
||||
}
|
||||
rl.Rule = append(rl.Rule, rr)
|
||||
}
|
||||
return &rl
|
||||
}
|
||||
|
||||
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
||||
var l Lifecycle
|
||||
if rl == nil {
|
||||
|
|
@ -1245,6 +1536,48 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
|||
return l
|
||||
}
|
||||
|
||||
func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
|
||||
var l Lifecycle
|
||||
if rl == nil {
|
||||
return l
|
||||
}
|
||||
for _, rr := range rl.GetRule() {
|
||||
r := LifecycleRule{
|
||||
Action: LifecycleAction{
|
||||
Type: rr.GetAction().GetType(),
|
||||
StorageClass: rr.GetAction().GetStorageClass(),
|
||||
},
|
||||
Condition: LifecycleCondition{
|
||||
AgeInDays: int64(rr.GetCondition().GetAgeDays()),
|
||||
DaysSinceCustomTime: int64(rr.GetCondition().GetDaysSinceCustomTime()),
|
||||
DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()),
|
||||
MatchesStorageClasses: rr.GetCondition().GetMatchesStorageClass(),
|
||||
NumNewerVersions: int64(rr.GetCondition().GetNumNewerVersions()),
|
||||
},
|
||||
}
|
||||
|
||||
if rr.GetCondition().IsLive == nil {
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
} else if rr.GetCondition().GetIsLive() {
|
||||
r.Condition.Liveness = Live
|
||||
} else {
|
||||
r.Condition.Liveness = Archived
|
||||
}
|
||||
|
||||
if rr.GetCondition().GetCreatedBefore() != nil {
|
||||
r.Condition.CreatedBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
|
||||
}
|
||||
if rr.GetCondition().GetCustomTimeBefore() != nil {
|
||||
r.Condition.CustomTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
|
||||
}
|
||||
if rr.GetCondition().GetNoncurrentTimeBefore() != nil {
|
||||
r.Condition.NoncurrentTimeBefore = adapters.ProtoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
|
||||
}
|
||||
l.Rules = append(l.Rules, r)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
|
||||
if e == nil {
|
||||
return nil
|
||||
|
|
@ -1254,6 +1587,15 @@ func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
|
|||
}
|
||||
}
|
||||
|
||||
func (e *BucketEncryption) toProtoBucketEncryption() *storagepb.Bucket_Encryption {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return &storagepb.Bucket_Encryption{
|
||||
DefaultKmsKey: e.DefaultKMSKeyName,
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
|
||||
if e == nil {
|
||||
return nil
|
||||
|
|
@ -1261,6 +1603,13 @@ func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
|
|||
return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
|
||||
}
|
||||
|
||||
func toBucketEncryptionFromProto(e *storagepb.Bucket_Encryption) *BucketEncryption {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return &BucketEncryption{DefaultKMSKeyName: e.GetDefaultKmsKey()}
|
||||
}
|
||||
|
||||
func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
|
||||
if b == nil {
|
||||
return nil
|
||||
|
|
@ -1271,6 +1620,16 @@ func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return &storagepb.Bucket_Logging{
|
||||
LogBucket: b.LogBucket,
|
||||
LogObjectPrefix: b.LogObjectPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
|
||||
if b == nil {
|
||||
return nil
|
||||
|
|
@ -1281,6 +1640,16 @@ func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
|
|||
}
|
||||
}
|
||||
|
||||
func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return &BucketLogging{
|
||||
LogBucket: b.GetLogBucket(),
|
||||
LogObjectPrefix: b.GetLogObjectPrefix(),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
|
||||
if w == nil {
|
||||
return nil
|
||||
|
|
@ -1291,6 +1660,16 @@ func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
|
|||
}
|
||||
}
|
||||
|
||||
func (w *BucketWebsite) toProtoBucketWebsite() *storagepb.Bucket_Website {
|
||||
if w == nil {
|
||||
return nil
|
||||
}
|
||||
return &storagepb.Bucket_Website{
|
||||
MainPageSuffix: w.MainPageSuffix,
|
||||
NotFoundPage: w.NotFoundPage,
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
|
||||
if w == nil {
|
||||
return nil
|
||||
|
|
@ -1301,6 +1680,16 @@ func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
|
|||
}
|
||||
}
|
||||
|
||||
func toBucketWebsiteFromProto(w *storagepb.Bucket_Website) *BucketWebsite {
|
||||
if w == nil {
|
||||
return nil
|
||||
}
|
||||
return &BucketWebsite{
|
||||
MainPageSuffix: w.GetMainPageSuffix(),
|
||||
NotFoundPage: w.GetNotFoundPage(),
|
||||
}
|
||||
}
|
||||
|
||||
func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly {
|
||||
if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled {
|
||||
return BucketPolicyOnly{}
|
||||
|
|
@ -1317,6 +1706,16 @@ func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly {
|
|||
}
|
||||
}
|
||||
|
||||
func toBucketPolicyOnlyFromProto(b *storagepb.Bucket_IamConfig) BucketPolicyOnly {
|
||||
if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() {
|
||||
return BucketPolicyOnly{}
|
||||
}
|
||||
return BucketPolicyOnly{
|
||||
Enabled: true,
|
||||
LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(),
|
||||
}
|
||||
}
|
||||
|
||||
func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess {
|
||||
if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled {
|
||||
return UniformBucketLevelAccess{}
|
||||
|
|
@ -1333,6 +1732,16 @@ func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLeve
|
|||
}
|
||||
}
|
||||
|
||||
func toUniformBucketLevelAccessFromProto(b *storagepb.Bucket_IamConfig) UniformBucketLevelAccess {
|
||||
if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() {
|
||||
return UniformBucketLevelAccess{}
|
||||
}
|
||||
return UniformBucketLevelAccess{
|
||||
Enabled: true,
|
||||
LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(),
|
||||
}
|
||||
}
|
||||
|
||||
func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevention {
|
||||
if b == nil {
|
||||
return PublicAccessPreventionUnknown
|
||||
|
|
@ -1347,6 +1756,48 @@ func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevent
|
|||
}
|
||||
}
|
||||
|
||||
func toPublicAccessPreventionFromProto(b *storagepb.Bucket_IamConfig) PublicAccessPrevention {
|
||||
if b == nil {
|
||||
return PublicAccessPreventionUnknown
|
||||
}
|
||||
switch b.GetPublicAccessPrevention() {
|
||||
case publicAccessPreventionInherited, publicAccessPreventionUnspecified:
|
||||
return PublicAccessPreventionInherited
|
||||
case publicAccessPreventionEnforced:
|
||||
return PublicAccessPreventionEnforced
|
||||
default:
|
||||
return PublicAccessPreventionUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func toRPO(b *raw.Bucket) RPO {
|
||||
if b == nil {
|
||||
return RPOUnknown
|
||||
}
|
||||
switch b.Rpo {
|
||||
case rpoDefault:
|
||||
return RPODefault
|
||||
case rpoAsyncTurbo:
|
||||
return RPOAsyncTurbo
|
||||
default:
|
||||
return RPOUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func toRPOFromProto(b *storagepb.Bucket) RPO {
|
||||
if b == nil {
|
||||
return RPOUnknown
|
||||
}
|
||||
switch b.GetRpo() {
|
||||
case rpoDefault:
|
||||
return RPODefault
|
||||
case rpoAsyncTurbo:
|
||||
return RPOAsyncTurbo
|
||||
default:
|
||||
return RPOUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// Objects returns an iterator over the objects in the bucket that match the
|
||||
// Query q. If q is nil, no filtering is done. Objects will be iterated over
|
||||
// lexicographically by name.
|
||||
|
|
@ -1367,6 +1818,33 @@ func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
|||
return it
|
||||
}
|
||||
|
||||
// Retryer returns a bucket handle that is configured with custom retry
|
||||
// behavior as specified by the options that are passed to it. All operations
|
||||
// on the new handle will use the customized retry configuration.
|
||||
// Retry options set on a object handle will take precedence over options set on
|
||||
// the bucket handle.
|
||||
// These retry options will merge with the client's retry configuration (if set)
|
||||
// for the returned handle. Options passed into this method will take precedence
|
||||
// over retry options on the client. Note that you must explicitly pass in each
|
||||
// option you want to override.
|
||||
func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle {
|
||||
b2 := *b
|
||||
var retry *retryConfig
|
||||
if b.retry != nil {
|
||||
// merge the options with the existing retry
|
||||
retry = b.retry
|
||||
} else {
|
||||
retry = &retryConfig{}
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.apply(retry)
|
||||
}
|
||||
b2.retry = retry
|
||||
b2.acl.retry = retry
|
||||
b2.defaultObjectACL.retry = retry
|
||||
return &b2
|
||||
}
|
||||
|
||||
// An ObjectIterator is an iterator over ObjectAttrs.
|
||||
//
|
||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
||||
|
|
@ -1422,6 +1900,7 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
|
|||
req.StartOffset(it.query.StartOffset)
|
||||
req.EndOffset(it.query.EndOffset)
|
||||
req.Versions(it.query.Versions)
|
||||
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
|
||||
if len(it.query.fieldSelection) > 0 {
|
||||
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
|
||||
}
|
||||
|
|
@ -1434,10 +1913,10 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
|
|||
}
|
||||
var resp *raw.Objects
|
||||
var err error
|
||||
err = runWithRetry(it.ctx, func() error {
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
})
|
||||
}, it.bucket.retry, true)
|
||||
if err != nil {
|
||||
var e *googleapi.Error
|
||||
if ok := xerrors.As(err, &e); ok && e.Code == http.StatusNotFound {
|
||||
|
|
@ -1518,10 +1997,10 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, e
|
|||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Buckets
|
||||
err = runWithRetry(it.ctx, func() error {
|
||||
err = run(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
})
|
||||
}, it.client.retry, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
@ -1534,3 +2013,39 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, e
|
|||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// RPO (Recovery Point Objective) configures the turbo replication feature. See
|
||||
// https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
|
||||
type RPO int
|
||||
|
||||
const (
|
||||
// RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO
|
||||
// is not present in the bucket metadata, that is, the bucket is not dual-region.
|
||||
// This value is also used if the RPO field is not set in a call to GCS.
|
||||
RPOUnknown RPO = iota
|
||||
|
||||
// RPODefault represents default replication. It is used to reset RPO on an
|
||||
// existing bucket that has this field set to RPOAsyncTurbo. Otherwise it
|
||||
// is equivalent to RPOUnknown, and is always ignored. This value is valid
|
||||
// for dual- or multi-region buckets.
|
||||
RPODefault
|
||||
|
||||
// RPOAsyncTurbo represents turbo replication and is used to enable Turbo
|
||||
// Replication on a bucket. This value is only valid for dual-region buckets.
|
||||
RPOAsyncTurbo
|
||||
|
||||
rpoUnknown string = ""
|
||||
rpoDefault = "DEFAULT"
|
||||
rpoAsyncTurbo = "ASYNC_TURBO"
|
||||
)
|
||||
|
||||
func (rpo RPO) String() string {
|
||||
switch rpo {
|
||||
case RPODefault:
|
||||
return rpoDefault
|
||||
case RPOAsyncTurbo:
|
||||
return rpoAsyncTurbo
|
||||
default:
|
||||
return rpoUnknown
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue