vendor: Update osbuild/images to commit dd48a38be218

This is needed for the test_distro.NewTestDistro change.
This commit is contained in:
Brian C. Lane 2023-09-15 08:18:06 -07:00 committed by Achilleas Koutsou
parent eab16830aa
commit 1b65f15449
345 changed files with 276130 additions and 14546 deletions

View file

@ -1,5 +1,12 @@
# Release History
## 1.6.1 (2023-06-06)
### Bugs Fixed
* Retry policy always clones the underlying `*http.Request` before invoking the next policy.
* Added some non-standard error codes to the list of error codes for unregistered resource providers.
* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry.
## 1.6.0 (2023-05-04)
### Features Added

View file

@ -76,12 +76,13 @@ type Client struct {
}
// NewClient creates a new Client instance with the provided values.
// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans
// - clientName - the fully qualified name of the client ("module/package.Client"); this is used by the telemetry policy and tracing provider.
// if module and package are the same value, the "module/" prefix can be omitted.
// - moduleVersion - the semantic version of the containing module; used by the telemetry policy
// - plOpts - pipeline configuration options; can be the zero-value
// - options - optional client configurations; pass nil to accept the default values
func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) {
pkg, err := shared.ExtractPackageName(clientName)
mod, client, err := shared.ExtractModuleName(clientName)
if err != nil {
return nil, err
}
@ -96,9 +97,9 @@ func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions,
}
}
pl := runtime.NewPipeline(pkg, moduleVersion, plOpts, options)
pl := runtime.NewPipeline(mod, moduleVersion, plOpts, options)
tr := options.TracingProvider.NewTracer(clientName, moduleVersion)
tr := options.TracingProvider.NewTracer(client, moduleVersion)
return &Client{pl: pl, tr: tr}, nil
}

View file

@ -32,5 +32,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
Version = "v1.6.0"
Version = "v1.6.1"
)

View file

@ -13,7 +13,6 @@ import (
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
@ -79,14 +78,26 @@ func ValidateModVer(moduleVersion string) error {
return nil
}
// ExtractPackageName returns "package" from "package.Client".
// ExtractModuleName returns "module", "package.Client" from "module/package.Client" or
// "package", "package.Client" from "package.Client" when there's no "module/" prefix.
// If clientName is malformed, an error is returned.
func ExtractPackageName(clientName string) (string, error) {
pkg, client, ok := strings.Cut(clientName, ".")
if !ok {
return "", fmt.Errorf("missing . in clientName %s", clientName)
} else if pkg == "" || client == "" {
return "", fmt.Errorf("malformed clientName %s", clientName)
func ExtractModuleName(clientName string) (string, string, error) {
// uses unnamed capturing for "module", "package.Client", and "package"
regex, err := regexp.Compile(`^(?:([a-z0-9]+)/)?(([a-z0-9]+)\.(?:[A-Za-z0-9]+))$`)
if err != nil {
return "", "", err
}
return pkg, nil
matches := regex.FindStringSubmatch(clientName)
if len(matches) < 4 {
return "", "", fmt.Errorf("malformed clientName %s", clientName)
}
// the first match is the entire string, the second is "module", the third is
// "package.Client" and the fourth is "package".
// if there was no "module/" prefix, the second match will be the empty string
if matches[1] != "" {
return matches[1], matches[2], nil
}
return matches[3], matches[2], nil
}

View file

@ -125,7 +125,8 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
}
if options.TryTimeout == 0 {
resp, err = req.Next()
clone := req.Clone(req.Raw().Context())
resp, err = clone.Next()
} else {
// Set the per-try time for this particular retry operation and then Do the operation.
tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout)

View file

@ -0,0 +1,4 @@
// DO NOT EDIT
package corehandlers
const isAwsInternal = ""

View file

@ -35,3 +35,13 @@ var AddHostExecEnvUserAgentHander = request.NamedHandler{
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
},
}
var AddAwsInternal = request.NamedHandler{
Name: "core.AddAwsInternal",
Fn: func(r *request.Request) {
if len(isAwsInternal) == 0 {
return
}
request.AddToUserAgent(r, isAwsInternal)
},
}

View file

@ -74,6 +74,7 @@ func Handlers() request.Handlers {
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
handlers.Build.PushBackNamed(corehandlers.AddAwsInternal)
handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
handlers.Build.AfterEachFn = request.HandlerListStopOnError
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)

View file

@ -1058,6 +1058,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -2928,6 +2931,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.eu-west-3.api.aws",
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "il-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "appmesh.il-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -5699,6 +5711,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -6270,6 +6285,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -6336,6 +6354,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -6388,6 +6409,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -6454,6 +6478,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -7087,6 +7114,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
@ -7227,6 +7257,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -8755,6 +8788,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -11358,63 +11394,183 @@ var awsPartition = partition{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "af-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.af-south-1.api.aws",
},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-east-1.api.aws",
},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-northeast-1.api.aws",
},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-northeast-2.api.aws",
},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-northeast-3.api.aws",
},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-south-1.api.aws",
},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-south-2.api.aws",
},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-southeast-1.api.aws",
},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{
Region: "ap-southeast-4",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ap-southeast-4.api.aws",
},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.ca-central-1.api.aws",
},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.eu-central-1.api.aws",
},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.eu-central-2.api.aws",
},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.eu-north-1.api.aws",
},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.eu-south-1.api.aws",
},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.eu-south-2.api.aws",
},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.eu-west-1.api.aws",
},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.eu-west-2.api.aws",
},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.eu-west-3.api.aws",
},
endpointKey{
Region: "fips",
}: endpoint{
@ -11427,18 +11583,48 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "il-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.il-central-1.api.aws",
},
endpointKey{
Region: "me-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.me-central-1.api.aws",
},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.me-south-1.api.aws",
},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.sa-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
@ -11457,6 +11643,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.us-east-2.api.aws",
},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
@ -11475,6 +11667,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-1",
}: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.us-west-1.api.aws",
},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
@ -11493,6 +11691,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.us-west-2.api.aws",
},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
@ -13790,6 +13994,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@ -13805,6 +14012,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -14245,7 +14455,7 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.ca-central-1.api.aws",
Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
@ -14316,7 +14526,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-east-1.api.aws",
Hostname: "internetmonitor-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@ -14327,7 +14537,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-east-2.api.aws",
Hostname: "internetmonitor-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@ -14338,7 +14548,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-west-1.api.aws",
Hostname: "internetmonitor-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@ -14349,7 +14559,7 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "internetmonitor-fips.us-west-2.api.aws",
Hostname: "internetmonitor-fips.us-west-2.amazonaws.com",
},
},
},
@ -15478,6 +15688,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -17401,6 +17614,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -17458,6 +17674,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -17997,6 +18216,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -18063,6 +18285,13 @@ var awsPartition = partition{
}: endpoint{},
},
},
"managedblockchain-query": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-east-1",
}: endpoint{},
},
},
"marketplacecommerceanalytics": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -18599,6 +18828,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -20157,6 +20389,14 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "oidc.il-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "il-central-1",
},
},
endpointKey{
Region: "me-south-1",
}: endpoint{
@ -21247,6 +21487,14 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
endpointKey{
Region: "il-central-1",
}: endpoint{
Hostname: "portal.sso.il-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "il-central-1",
},
},
endpointKey{
Region: "me-south-1",
}: endpoint{
@ -27745,6 +27993,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@ -29299,6 +29550,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "il-central-1",
}: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@ -32466,9 +32720,21 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-north-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn",
},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn",
},
},
},
"events": service{
@ -32626,6 +32892,16 @@ var awscnPartition = partition{
},
},
},
"identitystore": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
},
},
"internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -35924,6 +36200,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.us-gov-east-1.api.aws",
},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
@ -35942,6 +36224,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "aos.us-gov-west-1.api.aws",
},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
@ -36178,6 +36466,28 @@ var awsusgovPartition = partition{
},
},
},
"geo": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
Hostname: "geo-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "geo-fips.us-gov-west-1.amazonaws.com",
},
},
},
"glacier": service{
Endpoints: serviceEndpoints{
endpointKey{
@ -40198,14 +40508,45 @@ var awsisoPartition = partition{
},
"elasticmapreduce": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-iso-east-1",
}: endpoint{
Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-us-iso-west-1",
}: endpoint{
Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
CredentialScope: credentialScope{
Region: "us-iso-west-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-iso-east-1",
}: endpoint{
Protocols: []string{"https"},
},
endpointKey{
Region: "us-iso-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
Protocols: []string{"https"},
},
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
endpointKey{
Region: "us-iso-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
},
},
},
"es": service{
@ -40991,9 +41332,24 @@ var awsisobPartition = partition{
},
"elasticmapreduce": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-isob-east-1",
}: endpoint{
Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
endpointKey{
Region: "us-isob-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
},
},
},
"es": service{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.332"
const SDKVersion = "1.45.10"

View file

@ -35494,6 +35494,90 @@ func (c *EC2) DisableFastSnapshotRestoresWithContext(ctx aws.Context, input *Dis
return out, req.Send()
}
const opDisableImageBlockPublicAccess = "DisableImageBlockPublicAccess"
// DisableImageBlockPublicAccessRequest generates a "aws/request.Request" representing the
// client's request for the DisableImageBlockPublicAccess operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See DisableImageBlockPublicAccess for more information on using the DisableImageBlockPublicAccess
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the DisableImageBlockPublicAccessRequest method.
// req, resp := client.DisableImageBlockPublicAccessRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageBlockPublicAccess
func (c *EC2) DisableImageBlockPublicAccessRequest(input *DisableImageBlockPublicAccessInput) (req *request.Request, output *DisableImageBlockPublicAccessOutput) {
op := &request.Operation{
Name: opDisableImageBlockPublicAccess,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DisableImageBlockPublicAccessInput{}
}
output = &DisableImageBlockPublicAccessOutput{}
req = c.newRequest(op, input, output)
return
}
// DisableImageBlockPublicAccess API operation for Amazon Elastic Compute Cloud.
//
// Disables block public access for AMIs at the account level in the specified
// Amazon Web Services Region. This removes the block public access restriction
// from your account. With the restriction removed, you can publicly share your
// AMIs in the specified Amazon Web Services Region.
//
// The API can take up to 10 minutes to configure this setting. During this
// time, if you run GetImageBlockPublicAccessState (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetImageBlockPublicAccessState.html),
// the response will be block-new-sharing. When the API has completed the configuration,
// the response will be unblocked.
//
// For more information, see Block public access to your AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis)
// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
// API operation DisableImageBlockPublicAccess for usage and error information.
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DisableImageBlockPublicAccess
func (c *EC2) DisableImageBlockPublicAccess(input *DisableImageBlockPublicAccessInput) (*DisableImageBlockPublicAccessOutput, error) {
req, out := c.DisableImageBlockPublicAccessRequest(input)
return out, req.Send()
}
// DisableImageBlockPublicAccessWithContext is the same as DisableImageBlockPublicAccess with the addition of
// the ability to pass a context and additional request options.
//
// See DisableImageBlockPublicAccess for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *EC2) DisableImageBlockPublicAccessWithContext(ctx aws.Context, input *DisableImageBlockPublicAccessInput, opts ...request.Option) (*DisableImageBlockPublicAccessOutput, error) {
req, out := c.DisableImageBlockPublicAccessRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDisableImageDeprecation = "DisableImageDeprecation"
// DisableImageDeprecationRequest generates a "aws/request.Request" representing the
@ -37504,6 +37588,89 @@ func (c *EC2) EnableFastSnapshotRestoresWithContext(ctx aws.Context, input *Enab
return out, req.Send()
}
const opEnableImageBlockPublicAccess = "EnableImageBlockPublicAccess"
// EnableImageBlockPublicAccessRequest generates a "aws/request.Request" representing the
// client's request for the EnableImageBlockPublicAccess operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See EnableImageBlockPublicAccess for more information on using the EnableImageBlockPublicAccess
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the EnableImageBlockPublicAccessRequest method.
// req, resp := client.EnableImageBlockPublicAccessRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageBlockPublicAccess
func (c *EC2) EnableImageBlockPublicAccessRequest(input *EnableImageBlockPublicAccessInput) (req *request.Request, output *EnableImageBlockPublicAccessOutput) {
op := &request.Operation{
Name: opEnableImageBlockPublicAccess,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &EnableImageBlockPublicAccessInput{}
}
output = &EnableImageBlockPublicAccessOutput{}
req = c.newRequest(op, input, output)
return
}
// EnableImageBlockPublicAccess API operation for Amazon Elastic Compute Cloud.
//
// Enables block public access for AMIs at the account level in the specified
// Amazon Web Services Region. This prevents the public sharing of your AMIs.
// However, if you already have public AMIs, they will remain publicly available.
//
// The API can take up to 10 minutes to configure this setting. During this
// time, if you run GetImageBlockPublicAccessState (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetImageBlockPublicAccessState.html),
// the response will be unblocked. When the API has completed the configuration,
// the response will be block-new-sharing.
//
// For more information, see Block public access to your AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis)
// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
// API operation EnableImageBlockPublicAccess for usage and error information.
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EnableImageBlockPublicAccess
func (c *EC2) EnableImageBlockPublicAccess(input *EnableImageBlockPublicAccessInput) (*EnableImageBlockPublicAccessOutput, error) {
req, out := c.EnableImageBlockPublicAccessRequest(input)
return out, req.Send()
}
// EnableImageBlockPublicAccessWithContext is the same as EnableImageBlockPublicAccess with the addition of
// the ability to pass a context and additional request options.
//
// See EnableImageBlockPublicAccess for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *EC2) EnableImageBlockPublicAccessWithContext(ctx aws.Context, input *EnableImageBlockPublicAccessInput, opts ...request.Option) (*EnableImageBlockPublicAccessOutput, error) {
req, out := c.EnableImageBlockPublicAccessRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opEnableImageDeprecation = "EnableImageDeprecation"
// EnableImageDeprecationRequest generates a "aws/request.Request" representing the
@ -39681,6 +39848,83 @@ func (c *EC2) GetHostReservationPurchasePreviewWithContext(ctx aws.Context, inpu
return out, req.Send()
}
const opGetImageBlockPublicAccessState = "GetImageBlockPublicAccessState"
// GetImageBlockPublicAccessStateRequest generates a "aws/request.Request" representing the
// client's request for the GetImageBlockPublicAccessState operation. The "output" return
// value will be populated with the request's response once the request completes
// successfully.
//
// Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
//
// See GetImageBlockPublicAccessState for more information on using the GetImageBlockPublicAccessState
// API call, and error handling.
//
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the GetImageBlockPublicAccessStateRequest method.
// req, resp := client.GetImageBlockPublicAccessStateRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetImageBlockPublicAccessState
func (c *EC2) GetImageBlockPublicAccessStateRequest(input *GetImageBlockPublicAccessStateInput) (req *request.Request, output *GetImageBlockPublicAccessStateOutput) {
op := &request.Operation{
Name: opGetImageBlockPublicAccessState,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetImageBlockPublicAccessStateInput{}
}
output = &GetImageBlockPublicAccessStateOutput{}
req = c.newRequest(op, input, output)
return
}
// GetImageBlockPublicAccessState API operation for Amazon Elastic Compute Cloud.
//
// Gets the current state of block public access for AMIs at the account level
// in the specified Amazon Web Services Region.
//
// For more information, see Block public access to your AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-intro.html#block-public-access-to-amis)
// in the Amazon EC2 User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
// API operation GetImageBlockPublicAccessState for usage and error information.
// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetImageBlockPublicAccessState
func (c *EC2) GetImageBlockPublicAccessState(input *GetImageBlockPublicAccessStateInput) (*GetImageBlockPublicAccessStateOutput, error) {
req, out := c.GetImageBlockPublicAccessStateRequest(input)
return out, req.Send()
}
// GetImageBlockPublicAccessStateWithContext is the same as GetImageBlockPublicAccessState with the addition of
// the ability to pass a context and additional request options.
//
// See GetImageBlockPublicAccessState for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *EC2) GetImageBlockPublicAccessStateWithContext(ctx aws.Context, input *GetImageBlockPublicAccessStateInput, opts ...request.Option) (*GetImageBlockPublicAccessStateOutput, error) {
req, out := c.GetImageBlockPublicAccessStateRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opGetInstanceTypesFromInstanceRequirements = "GetInstanceTypesFromInstanceRequirements"
// GetInstanceTypesFromInstanceRequirementsRequest generates a "aws/request.Request" representing the
@ -110469,6 +110713,71 @@ func (s *DisableFastSnapshotRestoresOutput) SetUnsuccessful(v []*DisableFastSnap
return s
}
type DisableImageBlockPublicAccessInput struct {
_ struct{} `type:"structure"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DisableImageBlockPublicAccessInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DisableImageBlockPublicAccessInput) GoString() string {
return s.String()
}
// SetDryRun sets the DryRun field's value.
func (s *DisableImageBlockPublicAccessInput) SetDryRun(v bool) *DisableImageBlockPublicAccessInput {
s.DryRun = &v
return s
}
type DisableImageBlockPublicAccessOutput struct {
_ struct{} `type:"structure"`
// Returns unblocked if the request succeeds; otherwise, it returns an error.
ImageBlockPublicAccessState *string `locationName:"imageBlockPublicAccessState" type:"string" enum:"ImageBlockPublicAccessDisabledState"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DisableImageBlockPublicAccessOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DisableImageBlockPublicAccessOutput) GoString() string {
return s.String()
}
// SetImageBlockPublicAccessState sets the ImageBlockPublicAccessState field's value.
func (s *DisableImageBlockPublicAccessOutput) SetImageBlockPublicAccessState(v string) *DisableImageBlockPublicAccessOutput {
s.ImageBlockPublicAccessState = &v
return s
}
type DisableImageDeprecationInput struct {
_ struct{} `type:"structure"`
@ -114949,6 +115258,98 @@ func (s *EnableFastSnapshotRestoresOutput) SetUnsuccessful(v []*EnableFastSnapsh
return s
}
type EnableImageBlockPublicAccessInput struct {
_ struct{} `type:"structure"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
// Specify block-new-sharing to enable block public access for AMIs at the account
// level in the specified Region. This will block any attempt to publicly share
// your AMIs in the specified Region.
//
// ImageBlockPublicAccessState is a required field
ImageBlockPublicAccessState *string `type:"string" required:"true" enum:"ImageBlockPublicAccessEnabledState"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s EnableImageBlockPublicAccessInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s EnableImageBlockPublicAccessInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *EnableImageBlockPublicAccessInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "EnableImageBlockPublicAccessInput"}
if s.ImageBlockPublicAccessState == nil {
invalidParams.Add(request.NewErrParamRequired("ImageBlockPublicAccessState"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetDryRun sets the DryRun field's value.
func (s *EnableImageBlockPublicAccessInput) SetDryRun(v bool) *EnableImageBlockPublicAccessInput {
s.DryRun = &v
return s
}
// SetImageBlockPublicAccessState sets the ImageBlockPublicAccessState field's value.
func (s *EnableImageBlockPublicAccessInput) SetImageBlockPublicAccessState(v string) *EnableImageBlockPublicAccessInput {
s.ImageBlockPublicAccessState = &v
return s
}
type EnableImageBlockPublicAccessOutput struct {
_ struct{} `type:"structure"`
// Returns block-new-sharing if the request succeeds; otherwise, it returns
// an error.
ImageBlockPublicAccessState *string `locationName:"imageBlockPublicAccessState" type:"string" enum:"ImageBlockPublicAccessEnabledState"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s EnableImageBlockPublicAccessOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s EnableImageBlockPublicAccessOutput) GoString() string {
return s.String()
}
// SetImageBlockPublicAccessState sets the ImageBlockPublicAccessState field's value.
func (s *EnableImageBlockPublicAccessOutput) SetImageBlockPublicAccessState(v string) *EnableImageBlockPublicAccessOutput {
s.ImageBlockPublicAccessState = &v
return s
}
type EnableImageDeprecationInput struct {
_ struct{} `type:"structure"`
@ -121252,6 +121653,79 @@ func (s *GetHostReservationPurchasePreviewOutput) SetTotalUpfrontPrice(v string)
return s
}
type GetImageBlockPublicAccessStateInput struct {
_ struct{} `type:"structure"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `type:"boolean"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetImageBlockPublicAccessStateInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetImageBlockPublicAccessStateInput) GoString() string {
return s.String()
}
// SetDryRun sets the DryRun field's value.
func (s *GetImageBlockPublicAccessStateInput) SetDryRun(v bool) *GetImageBlockPublicAccessStateInput {
s.DryRun = &v
return s
}
type GetImageBlockPublicAccessStateOutput struct {
_ struct{} `type:"structure"`
// The current state of block public access for AMIs at the account level in
// the specified Amazon Web Services Region.
//
// Possible values:
//
// * block-new-sharing - Any attempt to publicly share your AMIs in the specified
// Region is blocked.
//
// * unblocked - Your AMIs in the specified Region can be publicly shared.
ImageBlockPublicAccessState *string `locationName:"imageBlockPublicAccessState" type:"string"`
}
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetImageBlockPublicAccessStateOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetImageBlockPublicAccessStateOutput) GoString() string {
return s.String()
}
// SetImageBlockPublicAccessState sets the ImageBlockPublicAccessState field's value.
func (s *GetImageBlockPublicAccessStateOutput) SetImageBlockPublicAccessState(v string) *GetImageBlockPublicAccessStateOutput {
s.ImageBlockPublicAccessState = &v
return s
}
type GetInstanceTypesFromInstanceRequirementsInput struct {
_ struct{} `type:"structure"`
@ -184799,6 +185273,30 @@ func ImageAttributeName_Values() []string {
}
}
const (
// ImageBlockPublicAccessDisabledStateUnblocked is a ImageBlockPublicAccessDisabledState enum value
ImageBlockPublicAccessDisabledStateUnblocked = "unblocked"
)
// ImageBlockPublicAccessDisabledState_Values returns all elements of the ImageBlockPublicAccessDisabledState enum
func ImageBlockPublicAccessDisabledState_Values() []string {
return []string{
ImageBlockPublicAccessDisabledStateUnblocked,
}
}
const (
// ImageBlockPublicAccessEnabledStateBlockNewSharing is a ImageBlockPublicAccessEnabledState enum value
ImageBlockPublicAccessEnabledStateBlockNewSharing = "block-new-sharing"
)
// ImageBlockPublicAccessEnabledState_Values returns all elements of the ImageBlockPublicAccessEnabledState enum
func ImageBlockPublicAccessEnabledState_Values() []string {
return []string{
ImageBlockPublicAccessEnabledStateBlockNewSharing,
}
}
const (
// ImageStatePending is a ImageState enum value
ImageStatePending = "pending"
@ -187295,6 +187793,78 @@ const (
// InstanceTypeHpc7a96xlarge is a InstanceType enum value
InstanceTypeHpc7a96xlarge = "hpc7a.96xlarge"
// InstanceTypeC7gdMedium is a InstanceType enum value
InstanceTypeC7gdMedium = "c7gd.medium"
// InstanceTypeC7gdLarge is a InstanceType enum value
InstanceTypeC7gdLarge = "c7gd.large"
// InstanceTypeC7gdXlarge is a InstanceType enum value
InstanceTypeC7gdXlarge = "c7gd.xlarge"
// InstanceTypeC7gd2xlarge is a InstanceType enum value
InstanceTypeC7gd2xlarge = "c7gd.2xlarge"
// InstanceTypeC7gd4xlarge is a InstanceType enum value
InstanceTypeC7gd4xlarge = "c7gd.4xlarge"
// InstanceTypeC7gd8xlarge is a InstanceType enum value
InstanceTypeC7gd8xlarge = "c7gd.8xlarge"
// InstanceTypeC7gd12xlarge is a InstanceType enum value
InstanceTypeC7gd12xlarge = "c7gd.12xlarge"
// InstanceTypeC7gd16xlarge is a InstanceType enum value
InstanceTypeC7gd16xlarge = "c7gd.16xlarge"
// InstanceTypeM7gdMedium is a InstanceType enum value
InstanceTypeM7gdMedium = "m7gd.medium"
// InstanceTypeM7gdLarge is a InstanceType enum value
InstanceTypeM7gdLarge = "m7gd.large"
// InstanceTypeM7gdXlarge is a InstanceType enum value
InstanceTypeM7gdXlarge = "m7gd.xlarge"
// InstanceTypeM7gd2xlarge is a InstanceType enum value
InstanceTypeM7gd2xlarge = "m7gd.2xlarge"
// InstanceTypeM7gd4xlarge is a InstanceType enum value
InstanceTypeM7gd4xlarge = "m7gd.4xlarge"
// InstanceTypeM7gd8xlarge is a InstanceType enum value
InstanceTypeM7gd8xlarge = "m7gd.8xlarge"
// InstanceTypeM7gd12xlarge is a InstanceType enum value
InstanceTypeM7gd12xlarge = "m7gd.12xlarge"
// InstanceTypeM7gd16xlarge is a InstanceType enum value
InstanceTypeM7gd16xlarge = "m7gd.16xlarge"
// InstanceTypeR7gdMedium is a InstanceType enum value
InstanceTypeR7gdMedium = "r7gd.medium"
// InstanceTypeR7gdLarge is a InstanceType enum value
InstanceTypeR7gdLarge = "r7gd.large"
// InstanceTypeR7gdXlarge is a InstanceType enum value
InstanceTypeR7gdXlarge = "r7gd.xlarge"
// InstanceTypeR7gd2xlarge is a InstanceType enum value
InstanceTypeR7gd2xlarge = "r7gd.2xlarge"
// InstanceTypeR7gd4xlarge is a InstanceType enum value
InstanceTypeR7gd4xlarge = "r7gd.4xlarge"
// InstanceTypeR7gd8xlarge is a InstanceType enum value
InstanceTypeR7gd8xlarge = "r7gd.8xlarge"
// InstanceTypeR7gd12xlarge is a InstanceType enum value
InstanceTypeR7gd12xlarge = "r7gd.12xlarge"
// InstanceTypeR7gd16xlarge is a InstanceType enum value
InstanceTypeR7gd16xlarge = "r7gd.16xlarge"
)
// InstanceType_Values returns all elements of the InstanceType enum
@ -187996,6 +188566,30 @@ func InstanceType_Values() []string {
InstanceTypeHpc7a24xlarge,
InstanceTypeHpc7a48xlarge,
InstanceTypeHpc7a96xlarge,
InstanceTypeC7gdMedium,
InstanceTypeC7gdLarge,
InstanceTypeC7gdXlarge,
InstanceTypeC7gd2xlarge,
InstanceTypeC7gd4xlarge,
InstanceTypeC7gd8xlarge,
InstanceTypeC7gd12xlarge,
InstanceTypeC7gd16xlarge,
InstanceTypeM7gdMedium,
InstanceTypeM7gdLarge,
InstanceTypeM7gdXlarge,
InstanceTypeM7gd2xlarge,
InstanceTypeM7gd4xlarge,
InstanceTypeM7gd8xlarge,
InstanceTypeM7gd12xlarge,
InstanceTypeM7gd16xlarge,
InstanceTypeR7gdMedium,
InstanceTypeR7gdLarge,
InstanceTypeR7gdXlarge,
InstanceTypeR7gd2xlarge,
InstanceTypeR7gd4xlarge,
InstanceTypeR7gd8xlarge,
InstanceTypeR7gd12xlarge,
InstanceTypeR7gd16xlarge,
}
}
@ -188944,6 +189538,9 @@ const (
// LocationTypeAvailabilityZoneId is a LocationType enum value
LocationTypeAvailabilityZoneId = "availability-zone-id"
// LocationTypeOutpost is a LocationType enum value
LocationTypeOutpost = "outpost"
)
// LocationType_Values returns all elements of the LocationType enum
@ -188952,6 +189549,7 @@ func LocationType_Values() []string {
LocationTypeRegion,
LocationTypeAvailabilityZone,
LocationTypeAvailabilityZoneId,
LocationTypeOutpost,
}
}

View file

@ -74,7 +74,6 @@ func IsErrorRetryable(err error) bool {
}
switch e := err.(type) {
case errcode.Error:
switch e.Code {
case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied,

View file

@ -286,7 +286,8 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
}
if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
}
return nil

View file

@ -133,6 +133,10 @@ type Options struct {
// Invalid when copying a non-multi-architecture image. That will probably
// change in the future.
EnsureCompressionVariantsExist []OptionCompressionVariant
// ForceCompressionFormat ensures that the compression algorithm set in
// DestinationCtx.CompressionFormat is used exclusively, and blobs of other
// compression algorithms are not reused.
ForceCompressionFormat bool
}
// OptionCompressionVariant allows to supply information about
@ -163,6 +167,14 @@ type copier struct {
signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed.
}
// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions
func shouldRequireCompressionFormatMatch(options *Options) (bool, error) {
if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) {
return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format")
}
return options.ForceCompressionFormat, nil
}
// Image copies image from srcRef to destRef, using policyContext to validate
// source image admissibility. It returns the manifest which was written to
// the new copy of the image.
@ -230,11 +242,13 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
unparsedToplevel: image.UnparsedInstance(rawSource, nil),
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
// we might want to add a separate CommonCtx — or would that be too confusing?
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more).
// Conceptually the cache settings should be in copy.Options instead.
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
}
defer c.close()
c.blobInfoCache.Open()
defer c.blobInfoCache.Close()
// Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel.
if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() {
@ -269,8 +283,12 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if len(options.EnsureCompressionVariantsExist) > 0 {
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
}
requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
return nil, err
}
// The simple case: just copy a single image.
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
if err != nil {
return nil, err
}
@ -279,6 +297,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
if len(options.EnsureCompressionVariantsExist) > 0 {
return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image")
}
requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
return nil, err
}
// This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that
// matches the current system to copy, and copy it.
mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx)
@ -295,7 +317,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
}
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: false})
single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch})
if err != nil {
return nil, fmt.Errorf("copying system image from manifest list: %w", err)
}

View file

@ -32,6 +32,10 @@ type instanceCopy struct {
op instanceCopyKind
sourceDigest digest.Digest
// Fields which can be used by callers when operation
// is `instanceCopyCopy`
copyForceCompressionFormat bool
// Fields which can be used by callers when operation
// is `instanceCopyClone`
cloneCompressionVariant OptionCompressionVariant
@ -122,9 +126,14 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.
if err != nil {
return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err)
}
forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options)
if err != nil {
return nil, err
}
res = append(res, instanceCopy{
op: instanceCopyCopy,
sourceDigest: instanceDigest,
op: instanceCopyCopy,
sourceDigest: instanceDigest,
copyForceCompressionFormat: forceCompressionFormat,
})
platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform)
compressionList := compressionsByPlatform[platform]
@ -230,7 +239,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList))
c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList))
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest)
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: false})
updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat})
if err != nil {
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err)
}

View file

@ -84,6 +84,8 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
),
mpb.AppendDecorators(
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
decor.Name(" | "),
decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
),
)
}
@ -94,6 +96,9 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
mpb.AppendDecorators(
decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""),
),
)
}
return &progressBar{

View file

@ -161,7 +161,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
return copySingleImageResult{}, err
}
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig != nil) || c.options.OciEncryptLayers != nil
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
srcMIMEType: ic.src.ManifestMIMEType,
@ -305,18 +305,18 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
options := newOrderedSet()
match := false
for _, wantedPlatform := range wantedPlatforms {
// Waiting for https://github.com/opencontainers/image-spec/pull/777 :
// This currently cant use image.MatchesPlatform because we dont know what to use
// for image.Variant.
if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture {
// For a transitional period, this might trigger warnings because the Variant
// field was added to OCI config only recently. If this turns out to be too noisy,
// revert this check to only look for (OS, Architecture).
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
match = true
break
}
options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture))
options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant))
}
if !match {
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q",
c.OS, c.Architecture, strings.Join(options.list, ", "))
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
}
}
return nil
@ -360,6 +360,7 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err)
return nil, nil
}
defer destImageSource.Close()
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
if err != nil {
@ -459,8 +460,14 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
encryptAll = len(*ic.c.options.OciEncryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.c.options.OciEncryptLayers {
// if layer is negative, it is reverse indexed.
layersToEncrypt.Add((totalLayers + l) % totalLayers)
switch {
case l >= 0 && l < totalLayers:
layersToEncrypt.Add(l)
case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers
layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed.
default:
return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers)
}
}
if encryptAll {
@ -655,8 +662,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
ic.c.printCopyInfo("blob", srcInfo)
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
diffIDIsNeeded := false
var cachedDiffID digest.Digest = ""
if ic.diffIDsAreNeeded {
cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded = cachedDiffID == ""
}
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
// but its not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, lets not.

View file

@ -1,7 +1,6 @@
package docker
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
@ -19,6 +18,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/internal/useragent"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/docker/config"
@ -121,6 +121,9 @@ type dockerClient struct {
// Private state for detectProperties:
detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
detectPropertiesError error // detectPropertiesError caches the initial error.
// Private state for logResponseWarnings
reportedWarningsLock sync.Mutex
reportedWarnings *set.Set[string]
}
type authScope struct {
@ -281,10 +284,11 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
}
return &dockerClient{
sys: sys,
registry: registry,
userAgent: userAgent,
tlsClientConfig: tlsClientConfig,
sys: sys,
registry: registry,
userAgent: userAgent,
tlsClientConfig: tlsClientConfig,
reportedWarnings: set.New[string](),
}, nil
}
@ -624,9 +628,76 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method
if err != nil {
return nil, err
}
if warnings := res.Header.Values("Warning"); len(warnings) != 0 {
c.logResponseWarnings(res, warnings)
}
return res, nil
}
// logResponseWarnings logs warningHeaders from res, if any.
func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) {
c.reportedWarningsLock.Lock()
defer c.reportedWarningsLock.Unlock()
for _, header := range warningHeaders {
warningString := parseRegistryWarningHeader(header)
if warningString == "" {
logrus.Debugf("Ignored Warning: header from registry: %q", header)
} else {
if !c.reportedWarnings.Contains(warningString) {
c.reportedWarnings.Add(warningString)
// Note that reportedWarnings is based only on warningString, so that we dont
// repeat the same warning for every request - but the warning includes the URL;
// so it may not be specific to that URL.
logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString)
} else {
logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString)
}
}
}
}
// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning
// values allowed by opencontainers/distribution-spec.
// It returns the warning string if the header has the expected format, or "" otherwise.
func parseRegistryWarningHeader(header string) string {
const expectedPrefix = `299 - "`
const expectedSuffix = `"`
// warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ]
// distribution-spec requires warn-code=299, warn-agent="-", warn-date missing
if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) {
return ""
}
header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)]
// ”Recipients that process the value of a quoted-string MUST handle a quoted-pair
// as if it were replaced by the octet following the backslash.”, so lets do that…
res := strings.Builder{}
afterBackslash := false
for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points
switch {
case c == 0x7F || (c < ' ' && c != '\t'):
return "" // Control characters are forbidden
case afterBackslash:
res.WriteByte(c)
afterBackslash = false
case c == '"':
// This terminates the warn-text and warn-date, forbidden by distribution-spec, follows,
// or completely invalid input.
return ""
case c == '\\':
afterBackslash = true
default:
res.WriteByte(c)
}
}
if afterBackslash {
return ""
}
return res.String()
}
// we're using the challenges from the /v2/ ping response and not the one from the destination
// URL in this request because:
//
@ -1008,9 +1079,10 @@ func isManifestUnknownError(err error) bool {
if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
return true
}
// ALSO registry.redhat.io as of October 2022
// opencontainers/distribution-spec does not require the errcode.Error payloads to be used,
// but specifies that the HTTP status must be 404.
var unexpected *unexpectedHTTPResponseError
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) {
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound {
return true
}
return false

View file

@ -367,6 +367,11 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// Sanity checks:
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
// OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
// (the "from" parameter); in that case we might try to use these candidates as well.
//
// OTOH that would mean we cant do the “blobExists” check, and if there is no match
// we could get an upload request that we would have to cancel.
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
continue
}

View file

@ -47,7 +47,12 @@ func httpResponseToError(res *http.Response, context string) error {
}
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
// registry
// registry.
//
// WARNING: The OCI distribution spec says
// “A `4XX` response code from the registry MAY return a body in any format.”; but if it is
// JSON, it MUST use the errcode.Error structure.
// So, callers should primarily decide based on HTTP StatusCode, not based on error type here.
func registryHTTPResponseToError(res *http.Response) error {
err := handleErrorResponse(res)
// len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is.

View file

@ -57,7 +57,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
// The caller should call .Close() on the returned archive when done.
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
// Save inputStream to a temporary file
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar")
if err != nil {
return nil, fmt.Errorf("creating temporary file: %w", err)
}

View file

@ -40,7 +40,7 @@ func DockerReferenceNamespaces(ref reference.Named) []string {
// then in its parent "docker.io/library"; in none of "busybox",
// un-namespaced "library" nor in "" supposedly implicitly representing "library/".
//
// ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last
// ref.Name() == ref.Domain() + "/" + ref.Path(), so the last
// iteration matches the host name (for any namespace).
res := []string{}
name := ref.Name()

View file

@ -23,6 +23,12 @@ type v1OnlyBlobInfoCache struct {
types.BlobInfoCache
}
func (bic *v1OnlyBlobInfoCache) Open() {
}
func (bic *v1OnlyBlobInfoCache) Close() {
}
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
}

View file

@ -18,6 +18,13 @@ const (
// of compression was applied to the blobs it keeps information about.
type BlobInfoCache2 interface {
types.BlobInfoCache
// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
Open()
// Close destroys state created by Open().
Close()
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
// or Uncompressed or UnknownCompression.
// WARNING: Only call this with LOCALLY VERIFIED data; dont record a compressor for a

View file

@ -12,8 +12,10 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache/none"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
type manifestOCI1 struct {
@ -86,7 +88,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
// old image manifests work (docker v2s1 especially).
func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
}
cb, err := m.ConfigBlob(ctx)
@ -194,26 +196,72 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
return m.convertToManifestSchema2(ctx, options)
}
// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions.
// If not, it returns (nil, nil).
// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos,
// and edits *options to not try decryption again.
func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool {
return info.CryptoOperation == types.Decrypt
}) {
return nil, nil
}
originalInfos := m.LayerInfos()
if len(originalInfos) != len(options.LayerInfos) {
return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos))
}
res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate.
updatedEdits := slices.Clone(options.LayerInfos)
for i, info := range options.LayerInfos {
if info.CryptoOperation == types.Decrypt {
res[i].CryptoOperation = types.Decrypt
updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
}
// Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
res[i].CompressionOperation = types.PreserveOriginal
res[i].CompressionAlgorithm = nil
}
options.LayerInfos = updatedEdits
return res, nil
}
// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned
// value.
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) {
func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
}
// Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits
// which remove OCI-specific features, because trying to convert those layers would fail.
// So, do the layer updates for decryption.
ociManifest := m.m
layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options)
if err != nil {
return nil, err
}
if layerDecryptEdits != nil {
ociManifest = manifest.OCI1Clone(ociManifest)
if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil {
return nil, err
}
}
// Create a copy of the descriptor.
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config)
// Above, we have already checked that this manifest refers to an image, not an OCI artifact,
// so the only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers))
for idx := range layers {
layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx])
switch layers[idx].MediaType {
case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType
@ -227,6 +275,10 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
case imgspecv1.MediaTypeImageLayerZstd:
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
// FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released
case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc,
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc:
return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType)
default:
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)
}
@ -244,7 +296,7 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani
// This does not change the state of the original manifestOCI1 object.
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) {
if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType)
return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest)
}
// We can't directly convert images to V1, but we can transitively convert via a V2 image

View file

@ -64,13 +64,8 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
MediaType: manifest.MediaType,
}
ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName}
ret.ReadOnly.Platform = &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSVersion: manifest.Platform.OSVersion,
OSFeatures: manifest.Platform.OSFeatures,
Variant: manifest.Platform.Variant,
}
platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
ret.ReadOnly.Platform = &platform
return ret, nil
}
}
@ -119,17 +114,20 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
}
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
case ListOpAdd:
addInstance := Schema2ManifestDescriptor{
Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType},
Schema2PlatformSpec{
OS: editInstance.AddPlatform.OS,
Architecture: editInstance.AddPlatform.Architecture,
OSVersion: editInstance.AddPlatform.OSVersion,
OSFeatures: editInstance.AddPlatform.OSFeatures,
Variant: editInstance.AddPlatform.Variant,
},
if editInstance.AddPlatform == nil {
// Should we create a struct with empty fields instead?
// Right now ListOpAdd is only called when an instance with the same platform value
// already exists in the manifest, so this should not be reached in practice.
return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported")
}
addedEntries = append(addedEntries, addInstance)
addedEntries = append(addedEntries, Schema2ManifestDescriptor{
Schema2Descriptor{
Digest: editInstance.AddDigest,
Size: editInstance.AddSize,
MediaType: editInstance.AddMediaType,
},
schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform),
})
default:
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
}
@ -158,13 +156,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range list.Manifests {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform)
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
@ -224,20 +216,14 @@ func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic {
func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) {
components := make([]imgspecv1.Descriptor, 0, len(list.Manifests))
for _, manifest := range list.Manifests {
converted := imgspecv1.Descriptor{
platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform)
components = append(components, imgspecv1.Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
Platform: &imgspecv1.Platform{
OS: manifest.Platform.OS,
Architecture: manifest.Platform.Architecture,
OSFeatures: slices.Clone(manifest.Platform.OSFeatures),
OSVersion: manifest.Platform.OSVersion,
Variant: manifest.Platform.Variant,
},
}
components = append(components, converted)
Platform: &platform,
})
}
oci := OCI1IndexPublicFromComponents(components, nil)
return oci, nil
@ -312,3 +298,15 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
}
return schema2ListFromPublic(public), nil
}
// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture.
func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform {
return imgspecv1.Platform{
Architecture: p.Architecture,
OS: p.OS,
OSVersion: p.OSVersion,
OSFeatures: slices.Clone(p.OSFeatures),
Variant: p.Variant,
// Features is not supported in OCI, and discarded.
}
}

View file

@ -1,6 +1,10 @@
package manifest
import "fmt"
import (
"fmt"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType.
// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 .
@ -26,8 +30,20 @@ type NonImageArtifactError struct {
mimeType string
}
// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType.
func NewNonImageArtifactError(mimeType string) error {
// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest.
//
// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig .
func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error {
// Callers decide based on manifest.Config.MediaType that this is not an image;
// in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically
// more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON.
//
// If ArtifactType and Config.MediaType are both defined and non-trivial, presumably
// ArtifactType is the “top-level” one, although thats not defined by the spec.
mimeType := manifest.ArtifactType
if mimeType == "" {
mimeType = manifest.Config.MediaType
}
return NonImageArtifactError{mimeType: mimeType}
}

View file

@ -170,8 +170,19 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
index.Manifests = append(index.Manifests, addedEntries...)
}
if len(addedEntries) != 0 || updatedAnnotations {
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool {
return !instanceIsZstd(a) && instanceIsZstd(b)
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
// FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes
// into more algorithms?
aZstd := instanceIsZstd(a)
bZstd := instanceIsZstd(b)
switch {
case aZstd == bZstd:
return 0
case !aZstd: // Implies bZstd
return -1
default: // aZstd && !bZstd
return 1
}
})
}
return nil
@ -228,13 +239,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi
for manifestIndex, d := range index.Manifests {
candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
if d.Platform != nil {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
imagePlatform := ociPlatformClone(*d.Platform)
platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool {
return platform.MatchesPlatform(imagePlatform, wantedPlatform)
})
@ -288,13 +293,8 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation
for i, component := range components {
var platform *imgspecv1.Platform
if component.Platform != nil {
platform = &imgspecv1.Platform{
Architecture: component.Platform.Architecture,
OS: component.Platform.OS,
OSVersion: component.Platform.OSVersion,
OSFeatures: slices.Clone(component.Platform.OSFeatures),
Variant: component.Platform.Variant,
}
platformCopy := ociPlatformClone(*component.Platform)
platform = &platformCopy
}
m := imgspecv1.Descriptor{
MediaType: component.MediaType,
@ -331,22 +331,15 @@ func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) {
Architecture: runtime.GOARCH,
}
}
converted := Schema2ManifestDescriptor{
components = append(components, Schema2ManifestDescriptor{
Schema2Descriptor{
MediaType: manifest.MediaType,
Size: manifest.Size,
Digest: manifest.Digest,
URLs: slices.Clone(manifest.URLs),
},
Schema2PlatformSpec{
OS: platform.OS,
Architecture: platform.Architecture,
OSFeatures: slices.Clone(platform.OSFeatures),
OSVersion: platform.OSVersion,
Variant: platform.Variant,
},
}
components = append(components, converted)
schema2PlatformSpecFromOCIPlatform(*platform),
})
}
s2 := Schema2ListPublicFromComponents(components)
return s2, nil
@ -420,3 +413,32 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
}
return oci1IndexFromPublic(public), nil
}
// ociPlatformClone returns an independent copy of p.
func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform {
// The only practical way in Go to give read-only access to an array is to copy it.
// The only practical way in Go to copy a deep structure is to either do it manually field by field,
// or to use reflection (incl. a round-trip through JSON, which uses reflection).
//
// The combination of the two is just sad, and leads to code like this, which will
// need to be updated with every new Platform field.
return imgspecv1.Platform{
Architecture: p.Architecture,
OS: p.OS,
OSVersion: p.OSVersion,
OSFeatures: slices.Clone(p.OSFeatures),
Variant: p.Variant,
}
}
// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure.
func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec {
return Schema2PlatformSpec{
Architecture: p.Architecture,
OS: p.OS,
OSVersion: p.OSVersion,
OSFeatures: slices.Clone(p.OSFeatures),
Variant: p.Variant,
Features: nil,
}
}

View file

@ -128,6 +128,10 @@ var compatibility = map[string][]string{
// the most compatible platform is first.
// If some option (arch, os, variant) is not present, a value from current platform is detected.
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
// Note that this does not use Platform.OSFeatures and Platform.OSVersion at all.
// The fields are not specified by the OCI specification, as of version 1.1, usefully enough
// to be interoperable, anyway.
wantedArch := runtime.GOARCH
wantedVariant := ""
if ctx != nil && ctx.ArchitectureChoice != "" {

View file

@ -15,7 +15,7 @@ import (
// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
// If an error occurs, inputInfo is not modified.
func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob")
if err != nil {
return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
}

View file

@ -17,10 +17,12 @@ var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles
// DO NOT change this, instead see unixTempDirForBigFiles above.
const builtinUnixTempDirForBigFiles = "/var/tmp"
const prefix = "container_images_"
// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
// which on systemd based systems could be the unsuitable tmpfs filesystem.
func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
func temporaryDirectoryForBigFiles(sys *types.SystemContext) string {
if sys != nil && sys.BigFilesTemporaryDir != "" {
return sys.BigFilesTemporaryDir
}
@ -32,3 +34,11 @@ func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
}
return temporaryDirectoryForBigFiles
}
func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) {
return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
}
func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) {
return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name)
}

View file

@ -154,6 +154,9 @@ func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
if info.CryptoOperation != types.PreserveOriginalCrypto {
return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest)
}
}
return nil
}

View file

@ -247,6 +247,9 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
m.LayersDescriptors[i].Digest = info.Digest
m.LayersDescriptors[i].Size = info.Size
m.LayersDescriptors[i].URLs = info.URLs
if info.CryptoOperation != types.PreserveOriginalCrypto {
return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest)
}
}
return nil
}

View file

@ -202,7 +202,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
// Most software calling this without human intervention is going to expect the values to be realistic and relevant,
// and is probably better served by failing; we can always re-visit that later if we fail now, but
// if we started returning some data for OCI artifacts now, we couldnt start failing in this function later.
return nil, manifest.NewNonImageArtifactError(m.Config.MediaType)
return nil, manifest.NewNonImageArtifactError(&m.Manifest)
}
config, err := configGetter(m.ConfigInfo())
@ -253,7 +253,7 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
// (The only known caller of ImageID is storage/storageImageDestination.computeID,
// which cant work with non-image artifacts.)
if m.Config.MediaType != imgspecv1.MediaTypeImageConfig {
return "", manifest.NewNonImageArtifactError(m.Config.MediaType)
return "", manifest.NewNonImageArtifactError(&m.Manifest)
}
if err := m.Config.Digest.Validate(); err != nil {

View file

@ -156,7 +156,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
// createOCIRef creates the oci reference of the image
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
dir, err := tmpdir.MkDirBigFileTemp(sys, "oci")
if err != nil {
return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err)
}

View file

@ -1,393 +0,0 @@
// Package boltdb implements a BlobInfoCache backed by BoltDB.
package boltdb
import (
"fmt"
"os"
"sync"
"time"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
var (
// NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
// we can simply start over with a different filename; update blobInfoCacheFilename.
// FIXME: For CRI-O, does this need to hide information between different users?
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
uncompressedDigestBucket = []byte("uncompressedDigest")
// digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed
// It may not exist in caches created by older versions, even if uncompressedDigestBucket is present.
digestCompressorBucket = []byte("digestCompressor")
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
// (as a set of key=digest, value="" pairs)
digestByUncompressedBucket = []byte("digestByUncompressed")
// knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
// a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
knownLocationsBucket = []byte("knownLocations")
)
// Concurrency:
// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
// pathLock contains a lock for a specific BoltDB database path.
type pathLock struct {
refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
}
var (
// pathLocks contains a lock for each currently open file.
// This must be global so that independently created instances of boltDBCache exclude each other.
// The map is protected by pathLocksMutex.
// FIXME? Should this be based on device:inode numbers instead of paths instead?
pathLocks = map[string]*pathLock{}
pathLocksMutex = sync.Mutex{}
)
// lockPath obtains the pathLock for path.
// The caller must call unlockPath eventually.
func lockPath(path string) {
pl := func() *pathLock { // A scope for defer
pathLocksMutex.Lock()
defer pathLocksMutex.Unlock()
pl, ok := pathLocks[path]
if ok {
pl.refCount++
} else {
pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
pathLocks[path] = pl
}
return pl
}()
pl.mutex.Lock()
}
// unlockPath releases the pathLock for path.
func unlockPath(path string) {
pathLocksMutex.Lock()
defer pathLocksMutex.Unlock()
pl, ok := pathLocks[path]
if !ok {
// Should this return an error instead? BlobInfoCache ultimately ignores errors…
panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
}
pl.mutex.Unlock()
pl.refCount--
if pl.refCount == 0 {
delete(pathLocks, path)
}
}
// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path.
//
// Note that we dont keep the database open across operations, because that would lock the file and block any other
// users; instead, we need to open/close it for every single write or lookup.
type cache struct {
path string
}
// New returns a BlobInfoCache implementation which uses a BoltDB file at path.
//
// Most users should call blobinfocache.DefaultCache instead.
func New(path string) types.BlobInfoCache {
return new2(path)
}
func new2(path string) *cache {
return &cache{path: path}
}
// view returns runs the specified fn within a read-only transaction on the database.
func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) {
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
// a read lock, blocking any future writes.
// Hence this preliminary check, which is RACY: Another process could remove the file
// between the Lstat call and opening the database.
if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
return err
}
lockPath(bdc.path)
defer unlockPath(bdc.path)
db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
if err != nil {
return err
}
defer func() {
if err := db.Close(); retErr == nil && err != nil {
retErr = err
}
}()
return db.View(fn)
}
// update returns runs the specified fn within a read-write transaction on the database.
func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) {
lockPath(bdc.path)
defer unlockPath(bdc.path)
db, err := bolt.Open(bdc.path, 0600, nil)
if err != nil {
return err
}
defer func() {
if err := db.Close(); retErr == nil && err != nil {
retErr = err
}
}()
return db.Update(fn)
}
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
d, err := digest.Parse(string(uncompressedBytes))
if err == nil {
return d
}
// FIXME? Log err (but throttle the log volume on repeated accesses)?
}
}
// Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
// when we already record a (compressed, uncompressed) pair.
if b := tx.Bucket(digestByUncompressedBucket); b != nil {
if b = b.Bucket([]byte(anyDigest.String())); b != nil {
c := b.Cursor()
if k, _ := c.First(); k != nil { // The bucket is non-empty
return anyDigest
}
}
}
return ""
}
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
var res digest.Digest
if err := bdc.view(func(tx *bolt.Tx) error {
res = bdc.uncompressedDigest(tx, anyDigest)
return nil
}); err != nil { // Including os.IsNotExist(err)
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
return res
}
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
// Its allowed for anyDigest == uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
if err != nil {
return err
}
key := []byte(anyDigest.String())
if previousBytes := b.Get(key); previousBytes != nil {
previous, err := digest.Parse(string(previousBytes))
if err != nil {
return err
}
if previous != uncompressed {
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
}
}
if err := b.Put(key, []byte(uncompressed.String())); err != nil {
return err
}
b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
if err != nil {
return err
}
b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
if err != nil {
return err
}
if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
return err
}
return nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified
// compressor, or is blobinfocache.Uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(digestCompressorBucket)
if err != nil {
return err
}
key := []byte(anyDigest.String())
if previousBytes := b.Get(key); previousBytes != nil {
if string(previousBytes) != compressorName {
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName)
}
}
if compressorName == blobinfocache.UnknownCompression {
return b.Delete(key)
}
return b.Put(key, []byte(compressorName))
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
if err != nil {
return err
}
b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
if err != nil {
return err
}
b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
if err != nil {
return err
}
b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
if err != nil {
return err
}
value, err := time.Now().MarshalBinary()
if err != nil {
return err
}
if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
return err
}
return nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates.
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
digestKey := []byte(digest.String())
b := scopeBucket.Bucket(digestKey)
if b == nil {
return candidates
}
compressorName := blobinfocache.UnknownCompression
if compressionBucket != nil {
// the bucket won't exist if the cache was created by a v1 implementation and
// hasn't yet been updated by a v2 implementation
if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 {
compressorName = string(compressorNameValue)
}
}
if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo {
return candidates
}
_ = b.ForEach(func(k, v []byte) error {
t := time.Time{}
if err := t.UnmarshalBinary(v); err != nil {
return err
}
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: types.BICLocationReference{Opaque: string(k)},
},
LastSeen: t,
})
return nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
return candidates
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
}
func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
res := []prioritize.CandidateWithTime{}
var uncompressedDigestValue digest.Digest // = ""
if err := bdc.view(func(tx *bolt.Tx) error {
scopeBucket := tx.Bucket(knownLocationsBucket)
if scopeBucket == nil {
return nil
}
scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
if scopeBucket == nil {
return nil
}
scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
if scopeBucket == nil {
return nil
}
// compressionBucket won't have been created if previous writers never recorded info about compression,
// and we don't want to fail just because of that
compressionBucket := tx.Bucket(digestCompressorBucket)
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo)
if canSubstitute {
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
b := tx.Bucket(digestByUncompressedBucket)
if b != nil {
b = b.Bucket([]byte(uncompressedDigestValue.String()))
if b != nil {
if err := b.ForEach(func(k, _ []byte) error {
d, err := digest.Parse(string(k))
if err != nil {
return err
}
if d != primaryDigest && d != uncompressedDigestValue {
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo)
}
return nil
}); err != nil {
return err
}
}
}
if uncompressedDigestValue != primaryDigest {
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo)
}
}
}
return nil
}); err != nil { // Including os.IsNotExist(err)
return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
}
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
}

View file

@ -6,8 +6,8 @@ import (
"path/filepath"
"github.com/containers/image/v5/internal/rootless"
"github.com/containers/image/v5/pkg/blobinfocache/boltdb"
"github.com/containers/image/v5/pkg/blobinfocache/memory"
"github.com/containers/image/v5/pkg/blobinfocache/sqlite"
"github.com/containers/image/v5/types"
"github.com/sirupsen/logrus"
)
@ -15,7 +15,7 @@ import (
const (
// blobInfoCacheFilename is the file name used for blob info caches.
// If the format changes in an incompatible way, increase the version number.
blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
blobInfoCacheFilename = "blob-info-cache-v1.sqlite"
// systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
systemBlobInfoCacheDir = "/var/lib/containers/cache"
)
@ -57,10 +57,20 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
}
path := filepath.Join(dir, blobInfoCacheFilename)
if err := os.MkdirAll(dir, 0700); err != nil {
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err)
return memory.New()
}
logrus.Debugf("Using blob info cache at %s", path)
return boltdb.New(path)
// It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open
// as global singleton, for the vast majority of callers who dont override thde cache location.
// OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely,
// and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%.
cache, err := sqlite.New(path)
if err != nil {
logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err)
return memory.New()
}
logrus.Debugf("Using SQLite blob info cache at %s", path)
return cache
}

View file

@ -82,6 +82,7 @@ func (css *candidateSortState) Swap(i, j int) {
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
sort.Sort(&candidateSortState{
cs: cs,
primaryDigest: primaryDigest,

View file

@ -27,7 +27,7 @@ type cache struct {
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest
}
// New returns a BlobInfoCache implementation which is in-memory only.
@ -51,6 +51,15 @@ func new2() *cache {
}
}
// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
func (mem *cache) Open() {
}
// Close destroys state created by Open().
func (mem *cache) Close() {
}
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
@ -114,6 +123,9 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName {
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName)
}
if compressorName == blobinfocache.UnknownCompression {
delete(mem.compressors, blobDigest)
return

View file

@ -0,0 +1,553 @@
// Package boltdb implements a BlobInfoCache backed by SQLite.
package sqlite
import (
"database/sql"
"errors"
"fmt"
"sync"
"time"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/v5/types"
_ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
const (
// NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
// we can simply start over with a different filename; update blobInfoCacheFilename.
// That also means we dont have to worry about co-existing readers/writers which know different versions of the schema
// (which would require compatibility in both directions).
// Assembled sqlite options used when opening the database.
sqliteOptions = "?" +
// Deal with timezone automatically.
// go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset.
// _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or)
// if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location;
// (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used).
"_loc=auto" +
// Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous).
"&_sync=FULL" +
// Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys).
// We dont currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons).
"&_foreign_keys=1" +
// Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html);
// i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock,
// never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously).
//
// This, together with go-sqlite3s default for _busy_timeout=5000, means that we should never see a “database is locked” error,
// the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous
// holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented.
// Compare https://github.com/mattn/go-sqlite3/issues/274 .
//
// Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167
// or https://github.com/mattn/go-sqlite3/issues/400 .
// The currently-proposed workaround is to create two different SQL “databases” (= connection pools) with different _txlock settings,
// which seems rather wasteful.
"&_txlock=exclusive"
)
// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path.
type cache struct {
path string
// The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool.
// Thats probably very applicable for database-backed services, where the database is the primary data store. Thats not necessarily
// the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly
// incidental even to the image operations. Its also hard for us to use that model, because the public BlobInfoCache object doesnt have
// a Close method, so creating a lot of single-use caches could leak data.
//
// Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image.
// This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open.
// Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single
// *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every
// single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation,
// somewhere around "700").
lock sync.Mutex
// The following fields can only be accessed with lock held.
refCount int // number of outstanding Open() calls
db *sql.DB // nil if not set (may happen even if refCount > 0 on errors)
}
// New returns BlobInfoCache implementation which uses a SQLite file at path.
//
// Most users should call blobinfocache.DefaultCache instead.
func New(path string) (types.BlobInfoCache, error) {
return new2(path)
}
func new2(path string) (*cache, error) {
db, err := rawOpen(path)
if err != nil {
return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err)
}
defer db.Close()
// We dont check the schema before every operation, because that would be costly
// and because we assume schema changes will be handled by using a different path.
if err := ensureDBHasCurrentSchema(db); err != nil {
return nil, err
}
return &cache{
path: path,
refCount: 0,
db: nil,
}, nil
}
// rawOpen returns a new *sql.DB for path.
// The caller should arrange for it to be .Close()d.
func rawOpen(path string) (*sql.DB, error) {
// This exists to centralize the use of sqliteOptions.
return sql.Open("sqlite3", path+sqliteOptions)
}
// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close().
// Note that public callers may call the types.BlobInfoCache operations without Open()/Close().
func (sqc *cache) Open() {
sqc.lock.Lock()
defer sqc.lock.Unlock()
if sqc.refCount == 0 {
db, err := rawOpen(sqc.path)
if err != nil {
logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err)
db = nil // But still increase sqc.refCount, because a .Close() will happen
}
sqc.db = db
}
sqc.refCount++
}
// Close destroys state created by Open().
func (sqc *cache) Close() {
sqc.lock.Lock()
defer sqc.lock.Unlock()
switch sqc.refCount {
case 0:
logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()")
return
case 1:
if sqc.db != nil {
sqc.db.Close()
sqc.db = nil
}
}
sqc.refCount--
}
type void struct{} // So that we dont have to write struct{}{} all over the place
// transaction calls fn within a read-write transaction in sqc.
func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer
sqc.lock.Lock()
defer sqc.lock.Unlock()
if sqc.db != nil {
return sqc.db, func() {}, nil
}
db, err := rawOpen(sqc.path)
if err != nil {
return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err)
}
return db, func() { db.Close() }, nil
}()
if err != nil {
var zeroRes T // A zero value of T
return zeroRes, err
}
defer closeDB()
return dbTransaction(db, fn)
}
// dbTransaction calls fn within a read-write transaction in db.
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
// Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion.
var zeroRes T // A zero value of T
tx, err := db.Begin()
if err != nil {
return zeroRes, fmt.Errorf("beginning transaction: %w", err)
}
succeeded := false
defer func() {
if !succeeded {
if err := tx.Rollback(); err != nil {
logrus.Errorf("Rolling back transaction: %v", err)
}
}
}()
res, err := fn(tx)
if err != nil {
return zeroRes, err
}
if err := tx.Commit(); err != nil {
return zeroRes, fmt.Errorf("committing transaction: %w", err)
}
succeeded = true
return res, nil
}
// querySingleValue executes a SELECT which is expected to return at most one row with a single column.
// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned.
func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) {
var value T
if err := tx.QueryRow(query, params...).Scan(&value); err != nil {
var zeroValue T // A zero value of T
if errors.Is(err, sql.ErrNoRows) {
return zeroValue, false, nil
}
return zeroValue, false, err
}
return value, true, nil
}
// ensureDBHasCurrentSchema adds the necessary tables and indices to a database.
// This is typically used when creating a previously-nonexistent database.
// We dont really anticipate schema migrations; with c/image usually vendored, not using
// shared libraries, migrating a schema on an existing database would affect old-version users.
// Instead, schema changes are likely to be implemented by using a different cache file name,
// and leaving existing caches around for old users.
func ensureDBHasCurrentSchema(db *sql.DB) error {
// Considered schema design alternatives:
//
// (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring
// to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.)
//
// * This schema uses the text representation of digests.
//
// We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation;
// and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm
// is documented to “define the string encoding” suggests that assuming a hexadecimal representation and turning that
// into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm
// — and that would require us to implement two code paths, one of them basically never exercised / never tested.
//
// * There are two separate items for recording the uncompressed digest and digest compressors.
// Alternatively, we could have a single "digest facts" table with NULLable columns.
//
// The way the BlobInfoCache API works, we are only going to write one value at a time, so
// sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples).
// Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs
// do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then
// we search in DigestUncompressedPairs by uncompressed digest, not by the primary key).
//
// Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to
// do a more verbose ON CONFLICT(…) DO UPDATE SET … = ….
//
// * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests.
//
// Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups)
// is probably costlier than comparing a few more bytes of data.
//
// Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without
// having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally).
//
items := []struct{ itemName, command string }{
{
"DigestUncompressedPairs",
`CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` +
// index implied by PRIMARY KEY
`anyDigest TEXT PRIMARY KEY NOT NULL,` +
// DigestUncompressedPairs_index_uncompressedDigest
`uncompressedDigest TEXT NOT NULL
)`,
},
{
"DigestUncompressedPairs_index_uncompressedDigest",
`CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`,
},
{
"DigestCompressors",
`CREATE TABLE IF NOT EXISTS DigestCompressors(` +
// index implied by PRIMARY KEY
`digest TEXT PRIMARY KEY NOT NULL,` +
// May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression).
`compressor TEXT NOT NULL
)`,
},
{
"KnownLocations",
`CREATE TABLE IF NOT EXISTS KnownLocations(
transport TEXT NOT NULL,
scope TEXT NOT NULL,
digest TEXT NOT NULL,
location TEXT NOT NULL,` +
// TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics)
// format "2006-01-02 15:04:05.999999999-07:00".
// See also the _loc option in the sql.Open data source name.
`time TIMESTAMP NOT NULL,` +
// Implies an index.
// We also search by (transport, scope, digest), that doesnt need an extra index
// because it is a prefix of the implied primary-key index.
`PRIMARY KEY (transport, scope, digest, location)
)`,
},
}
_, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
// If the the last-created item exists, assume nothing needs to be done.
lastItemName := items[len(items)-1].itemName
_, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName)
if err != nil {
return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err)
}
if !found {
// Item does not exist, assuming a fresh database.
for _, i := range items {
if _, err := tx.Exec(i.command); err != nil {
return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err)
}
}
}
return void{}, nil
})
return err
}
// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction.
func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) {
uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String())
if err != nil {
return "", err
}
if found {
d, err := digest.Parse(uncompressedString)
if err != nil {
return "", err
}
return d, nil
}
// A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest.
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
// when we already record a (compressed, uncompressed) pair.
_, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String())
if err != nil {
return "", err
}
if found {
return anyDigest, nil
}
return "", nil
}
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) {
return sqc.uncompressedDigest(tx, anyDigest)
})
if err != nil {
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
return res
}
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
// Its allowed for anyDigest == uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String())
if err != nil {
return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest)
}
if gotPrevious {
previous, err := digest.Parse(previousString)
if err != nil {
return void{}, err
}
if previous != uncompressed {
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
}
}
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)",
anyDigest.String(), uncompressed.String()); err != nil {
return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err)
}
return void{}, nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) {
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)",
transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry.
return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w",
location.Opaque, transport.Name(), scope.Opaque, digest.String(), err)
}
return void{}, nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
// or Uncompressed or UnknownCompression.
// WARNING: Only call this with LOCALLY VERIFIED data; dont record a compressor for a
// digest just because some remote author claims so (e.g. because a manifest says so);
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
if err != nil {
return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest)
}
if gotPrevious && previous != compressorName {
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName)
}
if compressorName == blobinfocache.UnknownCompression {
if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
}
} else {
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
anyDigest.String(), compressorName); err != nil {
return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err)
}
}
return void{}, nil
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) {
var rows *sql.Rows
var err error
if requireCompressionInfo {
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
"ON KnownLocations.digest = DigestCompressors.digest "+
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
transport.Name(), scope.Opaque, digest.String())
} else {
rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+
"LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
blobinfocache.UnknownCompression,
transport.Name(), scope.Opaque, digest.String())
}
if err != nil {
return nil, fmt.Errorf("looking up candidate locations: %w", err)
}
defer rows.Close()
for rows.Next() {
var location string
var time time.Time
var compressorName string
if err := rows.Scan(&location, &time, &compressorName); err != nil {
return nil, fmt.Errorf("scanning candidate: %w", err)
}
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: types.BICLocationReference{Opaque: location},
},
LastSeen: time,
})
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("iterating through locations: %w", err)
}
return candidates, nil
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
//
// The CompressorName fields in returned data must never be UnknownCompression.
func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
}
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
var uncompressedDigest digest.Digest // = ""
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
res := []prioritize.CandidateWithTime{}
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo)
if err != nil {
return nil, err
}
if canSubstitute {
uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest)
if err != nil {
return nil, err
}
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
// (In the extreme, we could turn _everything_ this function does into a single query.
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
if err != nil {
return nil, fmt.Errorf("querying for other digests: %w", err)
}
defer rows.Close()
for rows.Next() {
var otherDigestString string
if err := rows.Scan(&otherDigestString); err != nil {
return nil, fmt.Errorf("scanning other digest: %w", err)
}
otherDigest, err := digest.Parse(otherDigestString)
if err != nil {
return nil, err
}
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo)
if err != nil {
return nil, err
}
}
}
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("iterating through other digests: %w", err)
}
if uncompressedDigest != primaryDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo)
if err != nil {
return nil, err
}
}
}
return res, nil
})
if err != nil {
return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
}
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false))
}

View file

@ -519,11 +519,12 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool,
if sys.LegacyFormatAuthFilePath != "" {
return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil
}
if sys.RootForImplicitAbsolutePaths != "" {
// Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME
if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" {
return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil
}
}
if goOS == "windows" || goOS == "darwin" {
if goOS != "linux" {
return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil
}

View file

@ -10,9 +10,9 @@ import (
"errors"
"fmt"
"github.com/secure-systems-lab/go-securesystemslib/encrypted"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"github.com/sigstore/sigstore/pkg/signature"
"github.com/theupdateframework/go-tuf/encrypted"
)
// The following code was copied from github.com/sigstore.

View file

@ -6,7 +6,7 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 27
VersionMinor = 28
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0

View file

@ -13,12 +13,12 @@ linters:
linters-settings:
depguard:
list-type: denylist
include-go-root: true
packages:
# use "io" or "os" instead
# https://go.dev/doc/go1.16#ioutil
- io/ioutil
rules:
main:
files:
- $all
deny:
- pkg: "io/ioutil"
revive:
severity: error
@ -29,3 +29,7 @@ linters-settings:
- name: error-strings
disabled: false
staticcheck:
# Suppress reports of deprecated packages
checks: ["-SA1019"]

View file

@ -1,3 +1,3 @@
## The OCIcrypt Library Project Community Code of Conduct
The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md).
The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md).

View file

@ -1,3 +1,3 @@
## Security and Disclosure Information Policy for the OCIcrypt Library Project
The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects.
The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects.

View file

@ -102,7 +102,7 @@ func GetDefaultModuleDirectories() []string {
"/usr/lib/softhsm/", // Debian,Ubuntu
}
// Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|s390x)-linux-gnu/
// Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|riscv64|s390x)-linux-gnu/
hosttype, ostype, q := getHostAndOsType()
if len(hosttype) > 0 {
dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q)

View file

@ -105,6 +105,8 @@ func getHostAndOsType() (string, string, string) {
ht = "x86_64"
case "ppc64le":
ht = "powerpc64le"
case "riscv64":
ht = "riscv64"
case "s390x":
ht = "s390x"
}

View file

@ -24,7 +24,7 @@ import (
"github.com/containers/ocicrypt/config"
"github.com/containers/ocicrypt/keywrap"
"github.com/containers/ocicrypt/utils"
jose "gopkg.in/square/go-jose.v2"
"github.com/go-jose/go-jose/v3"
)
type jweKeyWrapper struct {

View file

@ -26,14 +26,13 @@ import (
"strings"
"github.com/containers/ocicrypt/crypto/pkcs11"
"github.com/go-jose/go-jose/v3"
"golang.org/x/crypto/openpgp"
json "gopkg.in/square/go-jose.v2"
)
// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key
func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) {
jwk := json.JSONWebKey{}
jwk := jose.JSONWebKey{}
err := jwk.UnmarshalJSON(privKey)
if err != nil {
return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err)
@ -46,7 +45,7 @@ func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) {
// parseJWKPublicKey parses the input byte array as a JWK
func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) {
jwk := json.JSONWebKey{}
jwk := jose.JSONWebKey{}
err := jwk.UnmarshalJSON(privKey)
if err != nil {
return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err)

View file

@ -92,7 +92,10 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf
return err
}
if s.Dev() != sourceStat.Dev() {
// Don't cross mount points. This ignores file mounts to avoid
// generating a diff which deletes all files following the
// mount.
if s.Dev() != sourceStat.Dev() && s.IsDir() {
return filepath.SkipDir
}

View file

@ -8,6 +8,7 @@ import (
"archive/tar"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"time"
@ -99,7 +100,7 @@ const (
// FooterSizeSupported is the footer size supported by this implementation.
// Newer versions of the image format might increase this value, so reject
// any version that is not supported.
FooterSizeSupported = 56
FooterSizeSupported = 64
)
var (
@ -108,7 +109,7 @@ var (
// https://tools.ietf.org/html/rfc8478#section-3.1.2
skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18}
ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
ZstdChunkedFrameMagic = []byte{0x47, 0x4e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78}
)
func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
@ -183,13 +184,19 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
return err
}
// Store the offset to the manifest and its size in LE order
manifestDataLE := make([]byte, FooterSizeSupported)
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest)))
binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest)))
binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS))
copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic)
footer := ZstdChunkedFooterData{
ManifestType: uint64(ManifestTypeCRFS),
Offset: manifestOffset,
LengthCompressed: uint64(len(compressedManifest)),
LengthUncompressed: uint64(len(manifest)),
ChecksumAnnotation: "", // unused
OffsetTarSplit: uint64(tarSplitOffset),
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
ChecksumAnnotationTarSplit: "", // unused
}
manifestDataLE := footerDataToBlob(footer)
return appendZstdSkippableFrame(dest, manifestDataLE)
}
@ -198,3 +205,79 @@ func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) {
el := zstd.EncoderLevelFromZstd(level)
return zstd.NewWriter(dest, zstd.WithEncoderLevel(el))
}
// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer.
type ZstdChunkedFooterData struct {
ManifestType uint64
Offset uint64
LengthCompressed uint64
LengthUncompressed uint64
ChecksumAnnotation string // Only used when reading a layer, not when creating it
OffsetTarSplit uint64
LengthCompressedTarSplit uint64
LengthUncompressedTarSplit uint64
ChecksumAnnotationTarSplit string // Only used when reading a layer, not when creating it
}
func footerDataToBlob(footer ZstdChunkedFooterData) []byte {
// Store the offset to the manifest and its size in LE order
manifestDataLE := make([]byte, FooterSizeSupported)
binary.LittleEndian.PutUint64(manifestDataLE[8*0:], footer.Offset)
binary.LittleEndian.PutUint64(manifestDataLE[8*1:], footer.LengthCompressed)
binary.LittleEndian.PutUint64(manifestDataLE[8*2:], footer.LengthUncompressed)
binary.LittleEndian.PutUint64(manifestDataLE[8*3:], footer.ManifestType)
binary.LittleEndian.PutUint64(manifestDataLE[8*4:], footer.OffsetTarSplit)
binary.LittleEndian.PutUint64(manifestDataLE[8*5:], footer.LengthCompressedTarSplit)
binary.LittleEndian.PutUint64(manifestDataLE[8*6:], footer.LengthUncompressedTarSplit)
copy(manifestDataLE[8*7:], ZstdChunkedFrameMagic)
return manifestDataLE
}
// ReadFooterDataFromAnnotations reads the zstd:chunked footer data from the given annotations.
func ReadFooterDataFromAnnotations(annotations map[string]string) (ZstdChunkedFooterData, error) {
var footerData ZstdChunkedFooterData
footerData.ChecksumAnnotation = annotations[ManifestChecksumKey]
if footerData.ChecksumAnnotation == "" {
return footerData, fmt.Errorf("manifest checksum annotation %q not found", ManifestChecksumKey)
}
offsetMetadata := annotations[ManifestInfoKey]
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &footerData.Offset, &footerData.LengthCompressed, &footerData.LengthUncompressed, &footerData.ManifestType); err != nil {
return footerData, err
}
if tarSplitInfoKeyAnnotation, found := annotations[TarSplitInfoKey]; found {
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &footerData.OffsetTarSplit, &footerData.LengthCompressedTarSplit, &footerData.LengthUncompressedTarSplit); err != nil {
return footerData, err
}
footerData.ChecksumAnnotationTarSplit = annotations[TarSplitChecksumKey]
}
return footerData, nil
}
// ReadFooterDataFromBlob reads the zstd:chunked footer from the binary buffer.
func ReadFooterDataFromBlob(footer []byte) (ZstdChunkedFooterData, error) {
var footerData ZstdChunkedFooterData
if len(footer) < FooterSizeSupported {
return footerData, errors.New("blob too small")
}
footerData.Offset = binary.LittleEndian.Uint64(footer[0:8])
footerData.LengthCompressed = binary.LittleEndian.Uint64(footer[8:16])
footerData.LengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
footerData.ManifestType = binary.LittleEndian.Uint64(footer[24:32])
footerData.OffsetTarSplit = binary.LittleEndian.Uint64(footer[32:40])
footerData.LengthCompressedTarSplit = binary.LittleEndian.Uint64(footer[40:48])
footerData.LengthUncompressedTarSplit = binary.LittleEndian.Uint64(footer[48:56])
// the magic number is stored in the last 8 bytes
if !bytes.Equal(ZstdChunkedFrameMagic, footer[len(footer)-len(ZstdChunkedFrameMagic):]) {
return footerData, errors.New("invalid magic number")
}
return footerData, nil
}

View file

@ -0,0 +1,82 @@
package lockfile
import (
"bytes"
cryptorand "crypto/rand"
"encoding/binary"
"os"
"sync/atomic"
"time"
)
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
//
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
type LastWrite struct {
// Never modify fields of a LastWrite object; it has value semantics.
state []byte // Contents of the lock file.
}
var lastWriterIDCounter uint64 // Private state for newLastWriterID
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
// newLastWrite returns a new "last write" ID.
// The value must be different on every call, and also differ from values
// generated by other processes.
func newLastWrite() LastWrite {
// The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter
// is an extra safeguard for in-process concurrency.
// The random part disambiguates across process namespaces
// (where PID values might collide), serves as a general-purpose
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
// because other versions of this code exist and they don't work
// efficiently if the size of the value changes.
pid := os.Getpid()
tm := time.Now().UnixNano()
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
res := make([]byte, lastWriterIDSize)
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
binary.LittleEndian.PutUint64(res[8:16], counter)
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
panic(err) // This shouldn't happen
}
return LastWrite{
state: res,
}
}
// serialize returns bytes to write to the lock file to represent the specified write.
func (lw LastWrite) serialize() []byte {
if lw.state == nil {
panic("LastWrite.serialize on an uninitialized object")
}
return lw.state
}
// Equals returns true if lw matches other
func (lw LastWrite) equals(other LastWrite) bool {
if lw.state == nil {
panic("LastWrite.equals on an uninitialized object")
}
if other.state == nil {
panic("LastWrite.equals with an uninitialized counterparty")
}
return bytes.Equal(lw.state, other.state)
}
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
func newLastWriteFromData(serialized []byte) LastWrite {
if serialized == nil {
panic("newLastWriteFromData with nil data")
}
return LastWrite{
state: serialized,
}
}

View file

@ -2,6 +2,7 @@ package lockfile
import (
"fmt"
"os"
"path/filepath"
"sync"
"time"
@ -54,6 +55,38 @@ type Locker interface {
AssertLockedForWriting()
}
type lockType byte
const (
readLock lockType = iota
writeLock
)
// LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
// They are safe to access without any other locking.
file string
ro bool
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
lw LastWrite // A global value valid as of the last .Touch() or .Modified()
lockType lockType
locked bool
// The following fields are only modified on transitions between counter == 0 / counter != 0.
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
// In other cases, they need to be protected using stateMutex.
fd fileHandle
}
var (
lockFiles map[string]*LockFile
lockFilesLock sync.Mutex
@ -91,6 +124,156 @@ func GetROLockfile(path string) (Locker, error) {
return GetROLockFile(path)
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *LockFile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(writeLock)
}
}
// LockRead locks the lockfile as a reader.
func (l *LockFile) RLock() {
l.lock(readLock)
}
// Unlock unlocks the lockfile.
func (l *LockFile) Unlock() {
l.stateMutex.Lock()
if !l.locked {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
l.counter--
if l.counter < 0 {
// Panic when the counter is negative. There is no way we can
// recover from a corrupted lock and we need to protect the
// storage from corruption.
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
}
if l.counter == 0 {
// We should only release the lock when the counter is 0 to
// avoid releasing read-locks too early; a given process may
// acquire a read lock multiple times.
l.locked = false
// Close the file descriptor on the last unlock, releasing the
// file lock.
unlockAndCloseHandle(l.fd)
}
if l.lockType == readLock {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
// we cant tell the difference.
//
// Hence, this “AssertLocked” method, which exists only for sanity checks.
// Dont even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
// with no possible writers.
// If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
// without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
if !l.locked {
panic("internal error: lock is not held by the expected owner")
}
}
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
l.AssertLocked()
// Like AssertLocked, dont even bother with l.stateMutex.
if l.lockType == readLock {
panic("internal error: lock is not held for writing")
}
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
l.AssertLocked()
currentLW, err := l.GetLastWrite()
if err != nil {
return LastWrite{}, false, err
}
modified := !previous.equals(currentLW)
return currentLW, modified, nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
// Callers cannot, in general, rely on this, because that might have happened for some other
// owner of the same *LockFile who created it previously.
//
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
oldLW := l.lw
// Note that this is called with stateMutex held; thats fine because ModifiedSince doesnt need to lock it.
currentLW, modified, err := l.ModifiedSince(oldLW)
if err != nil {
return true, err
}
l.lw = currentLW
return modified, nil
}
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
//
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
lw, err := l.RecordWrite()
if err != nil {
return err
}
l.stateMutex.Lock()
if !l.locked || (l.lockType == readLock) {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
l.lw = lw
return nil
}
// IsReadWrite indicates if the lock file is a read-write lock.
func (l *LockFile) IsReadWrite() bool {
return !l.ro
}
// getLockFile returns a *LockFile object, possibly (depending on the platform)
// working inter-process, and associated with the specified path.
//
@ -128,3 +311,99 @@ func getLockfile(path string, ro bool) (*LockFile, error) {
lockFiles[cleanPath] = lockFile
return lockFile, nil
}
// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
return nil, err
}
unlockAndCloseHandle(fd)
lType := writeLock
if ro {
lType = readLock
}
return &LockFile{
file: path,
ro: ro,
rwMutex: &sync.RWMutex{},
stateMutex: &sync.Mutex{},
lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
lockType: lType,
locked: false,
}, nil
}
// openLock opens the file at path and returns the corresponding file
// descriptor. The path is opened either read-only or read-write,
// depending on the value of ro argument.
//
// openLock will create the file and its parent directories,
// if necessary.
func openLock(path string, ro bool) (fd fileHandle, err error) {
flags := os.O_CREATE
if ro {
flags |= os.O_RDONLY
} else {
flags |= os.O_RDWR
}
fd, err = openHandle(path, flags)
if err == nil {
return fd, nil
}
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return fd, fmt.Errorf("creating lock file directory: %w", err)
}
return openLock(path, ro)
}
return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// lock locks the lockfile via syscall based on the specified type and
// command.
func (l *LockFile) lock(lType lockType) {
if lType == readLock {
l.rwMutex.RLock()
} else {
l.rwMutex.Lock()
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
panic(err)
}
l.fd = fd
// Optimization: only use the (expensive) syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
lockHandle(l.fd, lType)
}
l.lockType = lType
l.locked = true
l.counter++
}

View file

@ -4,297 +4,13 @@
package lockfile
import (
"bytes"
cryptorand "crypto/rand"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
// *LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
// The following fields are only set when constructing *LockFile, and must never be modified afterwards.
// They are safe to access without any other locking.
file string
ro bool
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
rwMutex *sync.RWMutex
// stateMutex is used to synchronize concurrent accesses to the state below
stateMutex *sync.Mutex
counter int64
lw LastWrite // A global value valid as of the last .Touch() or .Modified()
locktype int16
locked bool
// The following fields are only modified on transitions between counter == 0 / counter != 0.
// Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking.
// In other cases, they need to be protected using stateMutex.
fd uintptr
}
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
//
// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back.
type LastWrite struct {
// Never modify fields of a LastWrite object; it has value semantics.
state []byte // Contents of the lock file.
}
const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID)
var lastWriterIDCounter uint64 // Private state for newLastWriterID
// newLastWrite returns a new "last write" ID.
// The value must be different on every call, and also differ from values
// generated by other processes.
func newLastWrite() LastWrite {
// The ID is (PID, time, per-process counter, random)
// PID + time represents both a unique process across reboots,
// and a specific time within the process; the per-process counter
// is an extra safeguard for in-process concurrency.
// The random part disambiguates across process namespaces
// (where PID values might collide), serves as a general-purpose
// extra safety, _and_ is used to pad the output to lastWriterIDSize,
// because other versions of this code exist and they don't work
// efficiently if the size of the value changes.
pid := os.Getpid()
tm := time.Now().UnixNano()
counter := atomic.AddUint64(&lastWriterIDCounter, 1)
res := make([]byte, lastWriterIDSize)
binary.LittleEndian.PutUint64(res[0:8], uint64(tm))
binary.LittleEndian.PutUint64(res[8:16], counter)
binary.LittleEndian.PutUint32(res[16:20], uint32(pid))
if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 {
panic(err) // This shouldn't happen
}
return LastWrite{
state: res,
}
}
// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize
func newLastWriteFromData(serialized []byte) LastWrite {
if serialized == nil {
panic("newLastWriteFromData with nil data")
}
return LastWrite{
state: serialized,
}
}
// serialize returns bytes to write to the lock file to represent the specified write.
func (lw LastWrite) serialize() []byte {
if lw.state == nil {
panic("LastWrite.serialize on an uninitialized object")
}
return lw.state
}
// Equals returns true if lw matches other
func (lw LastWrite) equals(other LastWrite) bool {
if lw.state == nil {
panic("LastWrite.equals on an uninitialized object")
}
if other.state == nil {
panic("LastWrite.equals with an uninitialized counterparty")
}
return bytes.Equal(lw.state, other.state)
}
// openLock opens the file at path and returns the corresponding file
// descriptor. The path is opened either read-only or read-write,
// depending on the value of ro argument.
//
// openLock will create the file and its parent directories,
// if necessary.
func openLock(path string, ro bool) (fd int, err error) {
flags := unix.O_CLOEXEC | os.O_CREATE
if ro {
flags |= os.O_RDONLY
} else {
flags |= os.O_RDWR
}
fd, err = unix.Open(path, flags, 0o644)
if err == nil {
return fd, nil
}
// the directory of the lockfile seems to be removed, try to create it
if os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil {
return fd, fmt.Errorf("creating lock file directory: %w", err)
}
return openLock(path, ro)
}
return fd, &os.PathError{Op: "open", Path: path, Err: err}
}
// createLockFileForPath returns new *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
// Check if we can open the lock.
fd, err := openLock(path, ro)
if err != nil {
return nil, err
}
unix.Close(fd)
locktype := unix.F_WRLCK
if ro {
locktype = unix.F_RDLCK
}
return &LockFile{
file: path,
ro: ro,
rwMutex: &sync.RWMutex{},
stateMutex: &sync.Mutex{},
lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change.
locktype: int16(locktype),
locked: false,
}, nil
}
// lock locks the lockfile via FCTNL(2) based on the specified type and
// command.
func (l *LockFile) lock(lType int16) {
lk := unix.Flock_t{
Type: lType,
Whence: int16(unix.SEEK_SET),
Start: 0,
Len: 0,
}
switch lType {
case unix.F_RDLCK:
l.rwMutex.RLock()
case unix.F_WRLCK:
l.rwMutex.Lock()
default:
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
}
l.stateMutex.Lock()
defer l.stateMutex.Unlock()
if l.counter == 0 {
// If we're the first reference on the lock, we need to open the file again.
fd, err := openLock(l.file, l.ro)
if err != nil {
panic(err)
}
l.fd = uintptr(fd)
// Optimization: only use the (expensive) fcntl syscall when
// the counter is 0. In this case, we're either the first
// reader lock or a writer lock.
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
l.locktype = lType
l.locked = true
l.counter++
}
// Lock locks the lockfile as a writer. Panic if the lock is a read-only one.
func (l *LockFile) Lock() {
if l.ro {
panic("can't take write lock on read-only lock file")
} else {
l.lock(unix.F_WRLCK)
}
}
// LockRead locks the lockfile as a reader.
func (l *LockFile) RLock() {
l.lock(unix.F_RDLCK)
}
// Unlock unlocks the lockfile.
func (l *LockFile) Unlock() {
l.stateMutex.Lock()
if !l.locked {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")
}
l.counter--
if l.counter < 0 {
// Panic when the counter is negative. There is no way we can
// recover from a corrupted lock and we need to protect the
// storage from corruption.
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
}
if l.counter == 0 {
// We should only release the lock when the counter is 0 to
// avoid releasing read-locks too early; a given process may
// acquire a read lock multiple times.
l.locked = false
// Close the file descriptor on the last unlock, releasing the
// file lock.
unix.Close(int(l.fd))
}
if l.locktype == unix.F_RDLCK {
l.rwMutex.RUnlock()
} else {
l.rwMutex.Unlock()
}
l.stateMutex.Unlock()
}
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
// we cant tell the difference.
//
// Hence, this “AssertLocked” method, which exists only for sanity checks.
// Dont even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true
// with no possible writers.
// If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data
// without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers.
if !l.locked {
panic("internal error: lock is not held by the expected owner")
}
}
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
l.AssertLocked()
// Like AssertLocked, dont even bother with l.stateMutex.
if l.locktype != unix.F_WRLCK {
panic("internal error: lock is not held for writing")
}
}
type fileHandle uintptr
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
@ -341,81 +57,6 @@ func (l *LockFile) RecordWrite() (LastWrite, error) {
return lw, nil
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
l.AssertLocked()
currentLW, err := l.GetLastWrite()
if err != nil {
return LastWrite{}, false, err
}
modified := !previous.equals(currentLW)
return currentLW, modified, nil
}
// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data.
//
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
lw, err := l.RecordWrite()
if err != nil {
return err
}
l.stateMutex.Lock()
if !l.locked || (l.locktype != unix.F_WRLCK) {
panic("attempted to update last-writer in lockfile without the write lock")
}
defer l.stateMutex.Unlock()
l.lw = lw
return nil
}
// Modified indicates if the lockfile has been updated since the last time it
// was loaded.
// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile.
// Callers cannot, in general, rely on this, because that might have happened for some other
// owner of the same *LockFile who created it previously.
//
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
l.stateMutex.Lock()
if !l.locked {
panic("attempted to check last-writer in lockfile without locking it first")
}
defer l.stateMutex.Unlock()
oldLW := l.lw
// Note that this is called with stateMutex held; thats fine because ModifiedSince doesnt need to lock it.
currentLW, modified, err := l.ModifiedSince(oldLW)
if err != nil {
return true, err
}
l.lw = currentLW
return modified, nil
}
// IsReadWriteLock indicates if the lock file is a read-write lock.
func (l *LockFile) IsReadWrite() bool {
return !l.ro
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *LockFile) TouchedSince(when time.Time) bool {
st, err := system.Fstat(int(l.fd))
@ -426,3 +67,29 @@ func (l *LockFile) TouchedSince(when time.Time) bool {
touched := time.Unix(mtim.Unix())
return when.Before(touched)
}
func openHandle(path string, mode int) (fileHandle, error) {
mode |= unix.O_CLOEXEC
fd, err := unix.Open(path, mode, 0o644)
return fileHandle(fd), err
}
func lockHandle(fd fileHandle, lType lockType) {
fType := unix.F_RDLCK
if lType != readLock {
fType = unix.F_WRLCK
}
lk := unix.Flock_t{
Type: int16(fType),
Whence: int16(unix.SEEK_SET),
Start: 0,
Len: 0,
}
for unix.FcntlFlock(uintptr(fd), unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
func unlockAndCloseHandle(fd fileHandle) {
unix.Close(int(fd))
}

View file

@ -5,81 +5,19 @@ package lockfile
import (
"os"
"sync"
"time"
"golang.org/x/sys/windows"
)
// createLockFileForPath returns a *LockFile object, possibly (depending on the platform)
// working inter-process and associated with the specified path.
//
// This function will be called at most once for each path value within a single process.
//
// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the
// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock,
// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation.
//
// WARNING:
// - The lock may or MAY NOT be inter-process.
// - There may or MAY NOT be an actual object on the filesystem created for the specified path.
// - Even if ro, the lock MAY be exclusive.
func createLockFileForPath(path string, ro bool) (*LockFile, error) {
return &LockFile{locked: false}, nil
}
const (
reserved = 0
allBytes = ^uint32(0)
)
// *LockFile represents a file lock where the file is used to cache an
// identifier of the last party that made changes to whatever's being protected
// by the lock.
//
// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead.
type LockFile struct {
mu sync.Mutex
file string
locked bool
}
type fileHandle windows.Handle
// LastWrite is an opaque identifier of the last write to some *LockFile.
// It can be used by users of a *LockFile to determine if the lock indicates changes
// since the last check.
// A default-initialized LastWrite never matches any last write, i.e. it always indicates changes.
type LastWrite struct {
// Nothing: The Windows “implementation” does not actually track writes.
}
func (l *LockFile) Lock() {
l.mu.Lock()
l.locked = true
}
func (l *LockFile) RLock() {
l.mu.Lock()
l.locked = true
}
func (l *LockFile) Unlock() {
l.locked = false
l.mu.Unlock()
}
func (l *LockFile) AssertLocked() {
// DO NOT provide a variant that returns the value of l.locked.
//
// If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and
// we cant tell the difference.
//
// Hence, this “AssertLocked” method, which exists only for sanity checks.
if !l.locked {
panic("internal error: lock is not held by the expected owner")
}
}
func (l *LockFile) AssertLockedForWriting() {
// DO NOT provide a variant that returns the current lock state.
//
// The same caveats as for AssertLocked apply equally.
l.AssertLocked() // The current implementation does not distinguish between read and write locks.
}
// GetLastWrite() returns a LastWrite value corresponding to current state of the lock.
// GetLastWrite returns a LastWrite value corresponding to current state of the lock.
// This is typically called before (_not after_) loading the state when initializing a consumer
// of the data protected by the lock.
// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead.
@ -87,7 +25,18 @@ func (l *LockFile) AssertLockedForWriting() {
// The caller must hold the lock (for reading or writing) before this function is called.
func (l *LockFile) GetLastWrite() (LastWrite, error) {
l.AssertLocked()
return LastWrite{}, nil
contents := make([]byte, lastWriterIDSize)
ol := new(windows.Overlapped)
var n uint32
err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol)
if err != nil && err != windows.ERROR_HANDLE_EOF {
return LastWrite{}, err
}
// It is important to handle the partial read case, because
// the initial size of the lock file is zero, which is a valid
// state (no writes yet)
contents = contents[:n]
return newLastWriteFromData(contents), nil
}
// RecordWrite updates the lock with a new LastWrite value, and returns the new value.
@ -102,47 +51,22 @@ func (l *LockFile) GetLastWrite() (LastWrite, error) {
//
// The caller must hold the lock for writing.
func (l *LockFile) RecordWrite() (LastWrite, error) {
return LastWrite{}, nil
}
// ModifiedSince checks if the lock has been changed since a provided LastWrite value,
// and returns the one to record instead.
//
// If ModifiedSince reports no modification, the previous LastWrite value
// is still valid and can continue to be used.
//
// If this function fails, the LastWriter value of the lock is indeterminate;
// the caller should fail and keep using the previously-recorded LastWrite value,
// so that it continues failing until the situation is resolved. Similarly,
// it should only update the recorded LastWrite value after processing the update:
//
// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite)
// if err != nil { /* fail */ }
// state.lastWrite = lw2
// if modified {
// if err := reload(); err != nil { /* fail */ }
// state.lastWrite = lw2
// }
//
// The caller must hold the lock (for reading or writing).
func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) {
return LastWrite{}, false, nil
}
// Deprecated: Use *LockFile.ModifiedSince.
func (l *LockFile) Modified() (bool, error) {
return false, nil
}
// Deprecated: Use *LockFile.RecordWrite.
func (l *LockFile) Touch() error {
return nil
}
func (l *LockFile) IsReadWrite() bool {
return false
l.AssertLockedForWriting()
lw := newLastWrite()
lockContents := lw.serialize()
ol := new(windows.Overlapped)
var n uint32
err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol)
if err != nil {
return LastWrite{}, err
}
if int(n) != len(lockContents) {
return LastWrite{}, windows.ERROR_DISK_FULL
}
return lw, nil
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *LockFile) TouchedSince(when time.Time) bool {
stat, err := os.Stat(l.file)
if err != nil {
@ -150,3 +74,26 @@ func (l *LockFile) TouchedSince(when time.Time) bool {
}
return when.Before(stat.ModTime())
}
func openHandle(path string, mode int) (fileHandle, error) {
mode |= windows.O_CLOEXEC
fd, err := windows.Open(path, mode, windows.S_IWRITE)
return fileHandle(fd), err
}
func lockHandle(fd fileHandle, lType lockType) {
flags := 0
if lType != readLock {
flags = windows.LOCKFILE_EXCLUSIVE_LOCK
}
ol := new(windows.Overlapped)
if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil {
panic(err)
}
}
func unlockAndCloseHandle(fd fileHandle) {
ol := new(windows.Overlapped)
windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol)
windows.Close(windows.Handle(fd))
}

View file

@ -7,6 +7,8 @@ import (
"os"
"strconv"
"syscall"
"golang.org/x/sys/unix"
)
// StatT type contains status of a file. It contains metadata
@ -57,6 +59,10 @@ func (s StatT) Dev() uint64 {
return s.dev
}
func (s StatT) IsDir() bool {
return (s.mode & unix.S_IFDIR) != 0
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//

View file

@ -48,6 +48,10 @@ func (s StatT) Dev() uint64 {
return 0
}
func (s StatT) IsDir() bool {
return s.Mode().IsDir()
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//

View file

@ -26,7 +26,7 @@ func isValidCredsMessage(msg string) error {
// Store uses an external program to save credentials.
func Store(program ProgramFunc, creds *credentials.Credentials) error {
cmd := program("store")
cmd := program(credentials.ActionStore)
buffer := new(bytes.Buffer)
if err := json.NewEncoder(buffer).Encode(creds); err != nil {
@ -50,7 +50,7 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error {
// Get executes an external program to get the credentials from a native store.
func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
cmd := program("get")
cmd := program(credentials.ActionGet)
cmd.Input(strings.NewReader(serverURL))
out, err := cmd.Output()
@ -81,7 +81,7 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error
// Erase executes a program to remove the server credentials from the native store.
func Erase(program ProgramFunc, serverURL string) error {
cmd := program("erase")
cmd := program(credentials.ActionErase)
cmd.Input(strings.NewReader(serverURL))
out, err := cmd.Output()
if err != nil {
@ -99,7 +99,7 @@ func Erase(program ProgramFunc, serverURL string) error {
// List executes a program to list server credentials in the native store.
func List(program ProgramFunc) (map[string]string, error) {
cmd := program("list")
cmd := program(credentials.ActionList)
cmd.Input(strings.NewReader("unused"))
out, err := cmd.Output()
if err != nil {

View file

@ -1,11 +1,9 @@
package client
import (
"fmt"
"io"
"os"
exec "golang.org/x/sys/execabs"
"os/exec"
)
// Program is an interface to execute external programs.
@ -31,27 +29,26 @@ func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc
func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
programCmd := exec.Command(commandName, args...)
programCmd.Env = os.Environ()
if env != nil {
for k, v := range *env {
programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v))
programCmd.Env = append(programCmd.Environ(), k+"="+v)
}
}
programCmd.Stderr = os.Stderr
return programCmd
}
// Shell invokes shell commands to talk with a remote credentials helper.
// Shell invokes shell commands to talk with a remote credentials-helper.
type Shell struct {
cmd *exec.Cmd
}
// Output returns responses from the remote credentials helper.
// Output returns responses from the remote credentials-helper.
func (s *Shell) Output() ([]byte, error) {
return s.cmd.Output()
}
// Input sets the input to send to a remote credentials helper.
// Input sets the input to send to a remote credentials-helper.
func (s *Shell) Input(in io.Reader) {
s.cmd.Stdin = in
}

View file

@ -10,6 +10,20 @@ import (
"strings"
)
// Action defines the name of an action (sub-command) supported by a
// credential-helper binary. It is an alias for "string", and mostly
// for convenience.
type Action = string
// List of actions (sub-commands) supported by credential-helper binaries.
const (
ActionStore Action = "store"
ActionGet Action = "get"
ActionErase Action = "erase"
ActionList Action = "list"
ActionVersion Action = "version"
)
// Credentials holds the information shared between docker and the credentials store.
type Credentials struct {
ServerURL string
@ -43,42 +57,52 @@ func SetCredsLabel(label string) {
CredsLabel = label
}
// Serve initializes the credentials helper and parses the action argument.
// Serve initializes the credentials-helper and parses the action argument.
// This function is designed to be called from a command line interface.
// It uses os.Args[1] as the key for the action.
// It uses os.Stdin as input and os.Stdout as output.
// This function terminates the program with os.Exit(1) if there is an error.
func Serve(helper Helper) {
var err error
if len(os.Args) != 2 {
err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0])
_, _ = fmt.Fprintln(os.Stdout, usage())
os.Exit(1)
}
if err == nil {
err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
switch os.Args[1] {
case "--version", "-v":
_ = PrintVersion(os.Stdout)
os.Exit(0)
case "--help", "-h":
_, _ = fmt.Fprintln(os.Stdout, usage())
os.Exit(0)
}
if err != nil {
fmt.Fprintf(os.Stdout, "%v\n", err)
if err := HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout); err != nil {
_, _ = fmt.Fprintln(os.Stdout, err)
os.Exit(1)
}
}
// HandleCommand uses a helper and a key to run a credential action.
func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
switch key {
case "store":
func usage() string {
return fmt.Sprintf("Usage: %s <store|get|erase|list|version>", Name)
}
// HandleCommand runs a helper to execute a credential action.
func HandleCommand(helper Helper, action Action, in io.Reader, out io.Writer) error {
switch action {
case ActionStore:
return Store(helper, in)
case "get":
case ActionGet:
return Get(helper, in, out)
case "erase":
case ActionErase:
return Erase(helper, in)
case "list":
case ActionList:
return List(helper, out)
case "version":
case ActionVersion:
return PrintVersion(out)
default:
return fmt.Errorf("%s: unknown action: %s", Name, action)
}
return fmt.Errorf("Unknown credential action `%s`", key)
}
// Store uses a helper and an input reader to save credentials.
@ -132,18 +156,17 @@ func Get(helper Helper, reader io.Reader, writer io.Writer) error {
return err
}
resp := Credentials{
buffer.Reset()
err = json.NewEncoder(buffer).Encode(Credentials{
ServerURL: serverURL,
Username: username,
Secret: secret,
}
buffer.Reset()
if err := json.NewEncoder(buffer).Encode(resp); err != nil {
})
if err != nil {
return err
}
fmt.Fprint(writer, buffer.String())
_, _ = fmt.Fprint(writer, buffer.String())
return nil
}
@ -181,6 +204,6 @@ func List(helper Helper, writer io.Writer) error {
// PrintVersion outputs the current version.
func PrintVersion(writer io.Writer) error {
fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
_, _ = fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version)
return nil
}

View file

@ -1,5 +1,7 @@
package credentials
import "errors"
const (
// ErrCredentialsNotFound standardizes the not found error, so every helper returns
// the same message and docker can handle it properly.
@ -21,6 +23,11 @@ func (errCredentialsNotFound) Error() string {
return errCredentialsNotFoundMessage
}
// NotFound implements the [ErrNotFound][errdefs.ErrNotFound] interface.
//
// [errdefs.ErrNotFound]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrNotFound
func (errCredentialsNotFound) NotFound() {}
// NewErrCredentialsNotFound creates a new error
// for when the credentials are not in the store.
func NewErrCredentialsNotFound() error {
@ -30,8 +37,8 @@ func NewErrCredentialsNotFound() error {
// IsErrCredentialsNotFound returns true if the error
// was caused by not having a set of credentials in a store.
func IsErrCredentialsNotFound(err error) bool {
_, ok := err.(errCredentialsNotFound)
return ok
var target errCredentialsNotFound
return errors.As(err, &target)
}
// IsErrCredentialsNotFoundMessage returns true if the error
@ -53,6 +60,12 @@ func (errCredentialsMissingServerURL) Error() string {
return errCredentialsMissingServerURLMessage
}
// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter]
// interface.
//
// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter
func (errCredentialsMissingServerURL) InvalidParameter() {}
// errCredentialsMissingUsername represents an error raised
// when the credentials object has no username or when no
// username is provided to a credentials operation requiring
@ -63,6 +76,12 @@ func (errCredentialsMissingUsername) Error() string {
return errCredentialsMissingUsernameMessage
}
// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter]
// interface.
//
// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter
func (errCredentialsMissingUsername) InvalidParameter() {}
// NewErrCredentialsMissingServerURL creates a new error for
// errCredentialsMissingServerURL.
func NewErrCredentialsMissingServerURL() error {
@ -78,8 +97,8 @@ func NewErrCredentialsMissingUsername() error {
// IsCredentialsMissingServerURL returns true if the error
// was an errCredentialsMissingServerURL.
func IsCredentialsMissingServerURL(err error) bool {
_, ok := err.(errCredentialsMissingServerURL)
return ok
var target errCredentialsMissingServerURL
return errors.As(err, &target)
}
// IsCredentialsMissingServerURLMessage checks for an
@ -91,8 +110,8 @@ func IsCredentialsMissingServerURLMessage(err string) bool {
// IsCredentialsMissingUsername returns true if the error
// was an errCredentialsMissingUsername.
func IsCredentialsMissingUsername(err error) bool {
_, ok := err.(errCredentialsMissingUsername)
return ok
var target errCredentialsMissingUsername
return errors.As(err, &target)
}
// IsCredentialsMissingUsernameMessage checks for an

2
vendor/github.com/go-jose/go-jose/v3/.gitignore generated vendored Normal file
View file

@ -0,0 +1,2 @@
jose-util/jose-util
jose-util.t.err

53
vendor/github.com/go-jose/go-jose/v3/.golangci.yml generated vendored Normal file
View file

@ -0,0 +1,53 @@
# https://github.com/golangci/golangci-lint
run:
skip-files:
- doc_test.go
modules-download-mode: readonly
linters:
enable-all: true
disable:
- gochecknoglobals
- goconst
- lll
- maligned
- nakedret
- scopelint
- unparam
- funlen # added in 1.18 (requires go-jose changes before it can be enabled)
linters-settings:
gocyclo:
min-complexity: 35
issues:
exclude-rules:
- text: "don't use ALL_CAPS in Go names"
linters:
- golint
- text: "hardcoded credentials"
linters:
- gosec
- text: "weak cryptographic primitive"
linters:
- gosec
- path: json/
linters:
- dupl
- errcheck
- gocritic
- gocyclo
- golint
- govet
- ineffassign
- staticcheck
- structcheck
- stylecheck
- unused
- path: _test\.go
linters:
- scopelint
- path: jwk.go
linters:
- gocyclo

33
vendor/github.com/go-jose/go-jose/v3/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,33 @@
language: go
matrix:
fast_finish: true
allow_failures:
- go: tip
go:
- "1.13.x"
- "1.14.x"
- tip
before_script:
- export PATH=$HOME/.local/bin:$PATH
before_install:
- go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0
- pip install cram --user
script:
- go test -v -covermode=count -coverprofile=profile.cov .
- go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner
- go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher
- go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt
- go test -v ./json # no coverage for forked encoding/json package
- golangci-lint run
- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
- cd ..
after_success:
- gocovmerge *.cov */*.cov > merged.coverprofile
- goveralls -coverprofile merged.coverprofile -service=travis-ci

10
vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md generated vendored Normal file
View file

@ -0,0 +1,10 @@
Serious about security
======================
Square recognizes the important contributions the security research community
can make. We therefore encourage reporting security issues with the code
contained in this repository.
If you believe you have discovered a security vulnerability, please follow the
guidelines at <https://bugcrowd.com/squareopensource>.

15
vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md generated vendored Normal file
View file

@ -0,0 +1,15 @@
# Contributing
If you would like to contribute code to go-jose you can do so through GitHub by
forking the repository and sending a pull request.
When submitting code, please make every effort to follow existing conventions
and style in order to keep the code as readable as possible. Please also make
sure all tests pass by running `go test`, and format your code with `go fmt`.
We also recommend using `golint` and `errcheck`.
Before your code can be accepted into the project you must also sign the
Individual Contributor License Agreement. We use [cla-assistant.io][1] and you
will be prompted to sign once a pull request is opened.
[1]: https://cla-assistant.io/

202
vendor/github.com/go-jose/go-jose/v3/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

122
vendor/github.com/go-jose/go-jose/v3/README.md generated vendored Normal file
View file

@ -0,0 +1,122 @@
# Go JOSE
[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose)
[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose)
Package jose aims to provide an implementation of the Javascript Object Signing
and Encryption set of standards. This includes support for JSON Web Encryption,
JSON Web Signature, and JSON Web Token standards.
**Disclaimer**: This library contains encryption software that is subject to
the U.S. Export Administration Regulations. You may not export, re-export,
transfer or download this code or any part of it in violation of any United
States law, directive or regulation. In particular this software may not be
exported or re-exported in any form or on any media to Iran, North Sudan,
Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
US maintained blocked list.
## Overview
The implementation follows the
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
Tables of supported algorithms are shown below. The library supports both
the compact and JWS/JWE JSON Serialization formats, and has optional support for
multiple recipients. It also comes with a small command-line utility
([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util))
for dealing with JOSE messages in a shell.
**Note**: We use a forked version of the `encoding/json` package from the Go
standard library which uses case-sensitive matching for member names (instead
of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
This is to avoid differences in interpretation of messages between go-jose and
libraries in other languages.
### Versions
[Version 2](https://gopkg.in/go-jose/go-jose.v2)
([branch](https://github.com/go-jose/go-jose/tree/v2),
[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version:
import "gopkg.in/go-jose/go-jose.v2"
[Version 3](https://github.com/go-jose/go-jose)
([branch](https://github.com/go-jose/go-jose/tree/master),
[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet):
import "github.com/go-jose/go-jose/v3"
All new feature development takes place on the `master` branch, which we are
preparing to release as version 3 soon. Version 2 will continue to receive
critical bug and security fixes. Note that starting with version 3 we are
using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher.
Version 1 (on the `v1` branch) is frozen and not supported anymore.
### Supported algorithms
See below for a table of supported algorithms. Algorithm identifiers match
the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
standard where possible. The Godoc reference has a list of constants.
Key encryption | Algorithm identifier(s)
:------------------------- | :------------------------------
RSA-PKCS#1v1.5 | RSA1_5
RSA-OAEP | RSA-OAEP, RSA-OAEP-256
AES key wrap | A128KW, A192KW, A256KW
AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
ECDH-ES (direct) | ECDH-ES<sup>1</sup>
Direct encryption | dir<sup>1</sup>
<sup>1. Not supported in multi-recipient mode</sup>
Signing / MAC | Algorithm identifier(s)
:------------------------- | :------------------------------
RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
RSASSA-PSS | PS256, PS384, PS512
HMAC | HS256, HS384, HS512
ECDSA | ES256, ES384, ES512
Ed25519 | EdDSA<sup>2</sup>
<sup>2. Only available in version 2 of the package</sup>
Content encryption | Algorithm identifier(s)
:------------------------- | :------------------------------
AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
AES-GCM | A128GCM, A192GCM, A256GCM
Compression | Algorithm identifiers(s)
:------------------------- | -------------------------------
DEFLATE (RFC 1951) | DEF
### Supported key types
See below for a table of supported key types. These are understood by the
library, and can be passed to corresponding functions such as `NewEncrypter` or
`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
allows attaching a key id.
Algorithm(s) | Corresponding types
:------------------------- | -------------------------------
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey)
AES, HMAC | []byte
<sup>1. Only available in version 2 or later of the package</sup>
## Examples
[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
Examples can be found in the Godoc
reference for this package. The
[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)
subdirectory also contains a small command-line utility which might be useful
as an example as well.

592
vendor/github.com/go-jose/go-jose/v3/asymmetric.go generated vendored Normal file
View file

@ -0,0 +1,592 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto"
"crypto/aes"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"errors"
"fmt"
"math/big"
josecipher "github.com/go-jose/go-jose/v3/cipher"
"github.com/go-jose/go-jose/v3/json"
)
// A generic RSA-based encrypter/verifier
type rsaEncrypterVerifier struct {
publicKey *rsa.PublicKey
}
// A generic RSA-based decrypter/signer
type rsaDecrypterSigner struct {
privateKey *rsa.PrivateKey
}
// A generic EC-based encrypter/verifier
type ecEncrypterVerifier struct {
publicKey *ecdsa.PublicKey
}
type edEncrypterVerifier struct {
publicKey ed25519.PublicKey
}
// A key generator for ECDH-ES
type ecKeyGenerator struct {
size int
algID string
publicKey *ecdsa.PublicKey
}
// A generic EC-based decrypter/signer
type ecDecrypterSigner struct {
privateKey *ecdsa.PrivateKey
}
type edDecrypterSigner struct {
privateKey ed25519.PrivateKey
}
// newRSARecipient creates recipientKeyInfo based on the given key.
func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch keyAlg {
case RSA1_5, RSA_OAEP, RSA_OAEP_256:
default:
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
}
if publicKey == nil {
return recipientKeyInfo{}, errors.New("invalid public key")
}
return recipientKeyInfo{
keyAlg: keyAlg,
keyEncrypter: &rsaEncrypterVerifier{
publicKey: publicKey,
},
}, nil
}
// newRSASigner creates a recipientSigInfo based on the given key.
func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch sigAlg {
case RS256, RS384, RS512, PS256, PS384, PS512:
default:
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
if privateKey == nil {
return recipientSigInfo{}, errors.New("invalid private key")
}
return recipientSigInfo{
sigAlg: sigAlg,
publicKey: staticPublicKey(&JSONWebKey{
Key: privateKey.Public(),
}),
signer: &rsaDecrypterSigner{
privateKey: privateKey,
},
}, nil
}
func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
if sigAlg != EdDSA {
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
if privateKey == nil {
return recipientSigInfo{}, errors.New("invalid private key")
}
return recipientSigInfo{
sigAlg: sigAlg,
publicKey: staticPublicKey(&JSONWebKey{
Key: privateKey.Public(),
}),
signer: &edDecrypterSigner{
privateKey: privateKey,
},
}, nil
}
// newECDHRecipient creates recipientKeyInfo based on the given key.
func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch keyAlg {
case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
default:
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
}
if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
return recipientKeyInfo{}, errors.New("invalid public key")
}
return recipientKeyInfo{
keyAlg: keyAlg,
keyEncrypter: &ecEncrypterVerifier{
publicKey: publicKey,
},
}, nil
}
// newECDSASigner creates a recipientSigInfo based on the given key.
func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch sigAlg {
case ES256, ES384, ES512:
default:
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
if privateKey == nil {
return recipientSigInfo{}, errors.New("invalid private key")
}
return recipientSigInfo{
sigAlg: sigAlg,
publicKey: staticPublicKey(&JSONWebKey{
Key: privateKey.Public(),
}),
signer: &ecDecrypterSigner{
privateKey: privateKey,
},
}, nil
}
// Encrypt the given payload and update the object.
func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
encryptedKey, err := ctx.encrypt(cek, alg)
if err != nil {
return recipientInfo{}, err
}
return recipientInfo{
encryptedKey: encryptedKey,
header: &rawHeader{},
}, nil
}
// Encrypt the given payload. Based on the key encryption algorithm,
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
switch alg {
case RSA1_5:
return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
case RSA_OAEP:
return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
case RSA_OAEP_256:
return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
}
return nil, ErrUnsupportedAlgorithm
}
// Decrypt the given payload and return the content encryption key.
func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
}
// Decrypt the given payload. Based on the key encryption algorithm,
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
// Note: The random reader on decrypt operations is only used for blinding,
// so stubbing is meanlingless (hence the direct use of rand.Reader).
switch alg {
case RSA1_5:
defer func() {
// DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
// because of an index out of bounds error, which we want to ignore.
// This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
// only exists for preventing crashes with unpatched versions.
// See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
// See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
_ = recover()
}()
// Perform some input validation.
keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
if keyBytes != len(jek) {
// Input size is incorrect, the encrypted payload should always match
// the size of the public modulus (e.g. using a 2048 bit key will
// produce 256 bytes of output). Reject this since it's invalid input.
return nil, ErrCryptoFailure
}
cek, _, err := generator.genKey()
if err != nil {
return nil, ErrCryptoFailure
}
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
// the Million Message Attack on Cryptographic Message Syntax". We are
// therefore deliberately ignoring errors here.
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
return cek, nil
case RSA_OAEP:
// Use rand.Reader for RSA blinding
return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
case RSA_OAEP_256:
// Use rand.Reader for RSA blinding
return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
}
return nil, ErrUnsupportedAlgorithm
}
// Sign the given payload
func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
var hash crypto.Hash
switch alg {
case RS256, PS256:
hash = crypto.SHA256
case RS384, PS384:
hash = crypto.SHA384
case RS512, PS512:
hash = crypto.SHA512
default:
return Signature{}, ErrUnsupportedAlgorithm
}
hasher := hash.New()
// According to documentation, Write() on hash never fails
_, _ = hasher.Write(payload)
hashed := hasher.Sum(nil)
var out []byte
var err error
switch alg {
case RS256, RS384, RS512:
out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
case PS256, PS384, PS512:
out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
})
}
if err != nil {
return Signature{}, err
}
return Signature{
Signature: out,
protected: &rawHeader{},
}, nil
}
// Verify the given payload
func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
var hash crypto.Hash
switch alg {
case RS256, PS256:
hash = crypto.SHA256
case RS384, PS384:
hash = crypto.SHA384
case RS512, PS512:
hash = crypto.SHA512
default:
return ErrUnsupportedAlgorithm
}
hasher := hash.New()
// According to documentation, Write() on hash never fails
_, _ = hasher.Write(payload)
hashed := hasher.Sum(nil)
switch alg {
case RS256, RS384, RS512:
return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
case PS256, PS384, PS512:
return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
}
return ErrUnsupportedAlgorithm
}
// Encrypt the given payload and update the object.
func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
switch alg {
case ECDH_ES:
// ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
return recipientInfo{
header: &rawHeader{},
}, nil
case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
default:
return recipientInfo{}, ErrUnsupportedAlgorithm
}
generator := ecKeyGenerator{
algID: string(alg),
publicKey: ctx.publicKey,
}
switch alg {
case ECDH_ES_A128KW:
generator.size = 16
case ECDH_ES_A192KW:
generator.size = 24
case ECDH_ES_A256KW:
generator.size = 32
}
kek, header, err := generator.genKey()
if err != nil {
return recipientInfo{}, err
}
block, err := aes.NewCipher(kek)
if err != nil {
return recipientInfo{}, err
}
jek, err := josecipher.KeyWrap(block, cek)
if err != nil {
return recipientInfo{}, err
}
return recipientInfo{
encryptedKey: jek,
header: &header,
}, nil
}
// Get key size for EC key generator
func (ctx ecKeyGenerator) keySize() int {
return ctx.size
}
// Get a content encryption key for ECDH-ES
func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
if err != nil {
return nil, rawHeader{}, err
}
out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
b, err := json.Marshal(&JSONWebKey{
Key: &priv.PublicKey,
})
if err != nil {
return nil, nil, err
}
headers := rawHeader{
headerEPK: makeRawMessage(b),
}
return out, headers, nil
}
// Decrypt the given payload and return the content encryption key.
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
epk, err := headers.getEPK()
if err != nil {
return nil, errors.New("go-jose/go-jose: invalid epk header")
}
if epk == nil {
return nil, errors.New("go-jose/go-jose: missing epk header")
}
publicKey, ok := epk.Key.(*ecdsa.PublicKey)
if publicKey == nil || !ok {
return nil, errors.New("go-jose/go-jose: invalid epk header")
}
if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
}
apuData, err := headers.getAPU()
if err != nil {
return nil, errors.New("go-jose/go-jose: invalid apu header")
}
apvData, err := headers.getAPV()
if err != nil {
return nil, errors.New("go-jose/go-jose: invalid apv header")
}
deriveKey := func(algID string, size int) []byte {
return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
}
var keySize int
algorithm := headers.getAlgorithm()
switch algorithm {
case ECDH_ES:
// ECDH-ES uses direct key agreement, no key unwrapping necessary.
return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
case ECDH_ES_A128KW:
keySize = 16
case ECDH_ES_A192KW:
keySize = 24
case ECDH_ES_A256KW:
keySize = 32
default:
return nil, ErrUnsupportedAlgorithm
}
key := deriveKey(string(algorithm), keySize)
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
return josecipher.KeyUnwrap(block, recipient.encryptedKey)
}
func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
if alg != EdDSA {
return Signature{}, ErrUnsupportedAlgorithm
}
sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
if err != nil {
return Signature{}, err
}
return Signature{
Signature: sig,
protected: &rawHeader{},
}, nil
}
func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
if alg != EdDSA {
return ErrUnsupportedAlgorithm
}
ok := ed25519.Verify(ctx.publicKey, payload, signature)
if !ok {
return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
}
return nil
}
// Sign the given payload
func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
var expectedBitSize int
var hash crypto.Hash
switch alg {
case ES256:
expectedBitSize = 256
hash = crypto.SHA256
case ES384:
expectedBitSize = 384
hash = crypto.SHA384
case ES512:
expectedBitSize = 521
hash = crypto.SHA512
}
curveBits := ctx.privateKey.Curve.Params().BitSize
if expectedBitSize != curveBits {
return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
}
hasher := hash.New()
// According to documentation, Write() on hash never fails
_, _ = hasher.Write(payload)
hashed := hasher.Sum(nil)
r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
if err != nil {
return Signature{}, err
}
keyBytes := curveBits / 8
if curveBits%8 > 0 {
keyBytes++
}
// We serialize the outputs (r and s) into big-endian byte arrays and pad
// them with zeros on the left to make sure the sizes work out. Both arrays
// must be keyBytes long, and the output must be 2*keyBytes long.
rBytes := r.Bytes()
rBytesPadded := make([]byte, keyBytes)
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
sBytes := s.Bytes()
sBytesPadded := make([]byte, keyBytes)
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
out := append(rBytesPadded, sBytesPadded...)
return Signature{
Signature: out,
protected: &rawHeader{},
}, nil
}
// Verify the given payload
func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
var keySize int
var hash crypto.Hash
switch alg {
case ES256:
keySize = 32
hash = crypto.SHA256
case ES384:
keySize = 48
hash = crypto.SHA384
case ES512:
keySize = 66
hash = crypto.SHA512
default:
return ErrUnsupportedAlgorithm
}
if len(signature) != 2*keySize {
return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
}
hasher := hash.New()
// According to documentation, Write() on hash never fails
_, _ = hasher.Write(payload)
hashed := hasher.Sum(nil)
r := big.NewInt(0).SetBytes(signature[:keySize])
s := big.NewInt(0).SetBytes(signature[keySize:])
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
if !match {
return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
}
return nil
}

196
vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go generated vendored Normal file
View file

@ -0,0 +1,196 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"bytes"
"crypto/cipher"
"crypto/hmac"
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"encoding/binary"
"errors"
"hash"
)
const (
nonceBytes = 16
)
// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
keySize := len(key) / 2
integrityKey := key[:keySize]
encryptionKey := key[keySize:]
blockCipher, err := newBlockCipher(encryptionKey)
if err != nil {
return nil, err
}
var hash func() hash.Hash
switch keySize {
case 16:
hash = sha256.New
case 24:
hash = sha512.New384
case 32:
hash = sha512.New
}
return &cbcAEAD{
hash: hash,
blockCipher: blockCipher,
authtagBytes: keySize,
integrityKey: integrityKey,
}, nil
}
// An AEAD based on CBC+HMAC
type cbcAEAD struct {
hash func() hash.Hash
authtagBytes int
integrityKey []byte
blockCipher cipher.Block
}
func (ctx *cbcAEAD) NonceSize() int {
return nonceBytes
}
func (ctx *cbcAEAD) Overhead() int {
// Maximum overhead is block size (for padding) plus auth tag length, where
// the length of the auth tag is equivalent to the key size.
return ctx.blockCipher.BlockSize() + ctx.authtagBytes
}
// Seal encrypts and authenticates the plaintext.
func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
// Output buffer -- must take care not to mangle plaintext input.
ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
copy(ciphertext, plaintext)
ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
cbc.CryptBlocks(ciphertext, ciphertext)
authtag := ctx.computeAuthTag(data, nonce, ciphertext)
ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
copy(out, ciphertext)
copy(out[len(ciphertext):], authtag)
return ret
}
// Open decrypts and authenticates the ciphertext.
func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
if len(ciphertext) < ctx.authtagBytes {
return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)")
}
offset := len(ciphertext) - ctx.authtagBytes
expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
if match != 1 {
return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)")
}
cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
// Make copy of ciphertext buffer, don't want to modify in place
buffer := append([]byte{}, ciphertext[:offset]...)
if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)")
}
cbc.CryptBlocks(buffer, buffer)
// Remove padding
plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
if err != nil {
return nil, err
}
ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
copy(out, plaintext)
return ret, nil
}
// Compute an authentication tag
func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
n := 0
n += copy(buffer, aad)
n += copy(buffer[n:], nonce)
n += copy(buffer[n:], ciphertext)
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
// According to documentation, Write() on hash.Hash never fails.
hmac := hmac.New(ctx.hash, ctx.integrityKey)
_, _ = hmac.Write(buffer)
return hmac.Sum(nil)[:ctx.authtagBytes]
}
// resize ensures that the given slice has a capacity of at least n bytes.
// If the capacity of the slice is less than n, a new slice is allocated
// and the existing data will be copied.
func resize(in []byte, n uint64) (head, tail []byte) {
if uint64(cap(in)) >= n {
head = in[:n]
} else {
head = make([]byte, n)
copy(head, in)
}
tail = head[len(in):]
return
}
// Apply padding
func padBuffer(buffer []byte, blockSize int) []byte {
missing := blockSize - (len(buffer) % blockSize)
ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
padding := bytes.Repeat([]byte{byte(missing)}, missing)
copy(out, padding)
return ret
}
// Remove padding
func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
if len(buffer)%blockSize != 0 {
return nil, errors.New("go-jose/go-jose: invalid padding")
}
last := buffer[len(buffer)-1]
count := int(last)
if count == 0 || count > blockSize || count > len(buffer) {
return nil, errors.New("go-jose/go-jose: invalid padding")
}
padding := bytes.Repeat([]byte{last}, count)
if !bytes.HasSuffix(buffer, padding) {
return nil, errors.New("go-jose/go-jose: invalid padding")
}
return buffer[:len(buffer)-count], nil
}

View file

@ -0,0 +1,75 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"crypto"
"encoding/binary"
"hash"
"io"
)
type concatKDF struct {
z, info []byte
i uint32
cache []byte
hasher hash.Hash
}
// NewConcatKDF builds a KDF reader based on the given inputs.
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
n := 0
n += copy(buffer, algID)
n += copy(buffer[n:], ptyUInfo)
n += copy(buffer[n:], ptyVInfo)
n += copy(buffer[n:], supPubInfo)
copy(buffer[n:], supPrivInfo)
hasher := hash.New()
return &concatKDF{
z: z,
info: buffer,
hasher: hasher,
cache: []byte{},
i: 1,
}
}
func (ctx *concatKDF) Read(out []byte) (int, error) {
copied := copy(out, ctx.cache)
ctx.cache = ctx.cache[copied:]
for copied < len(out) {
ctx.hasher.Reset()
// Write on a hash.Hash never fails
_ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
_, _ = ctx.hasher.Write(ctx.z)
_, _ = ctx.hasher.Write(ctx.info)
hash := ctx.hasher.Sum(nil)
chunkCopied := copy(out[copied:], hash)
copied += chunkCopied
ctx.cache = hash[chunkCopied:]
ctx.i++
}
return copied, nil
}

86
vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go generated vendored Normal file
View file

@ -0,0 +1,86 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"encoding/binary"
)
// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
// It is an error to call this function with a private/public key that are not on the same
// curve. Callers must ensure that the keys are valid before calling this function. Output
// size may be at most 1<<16 bytes (64 KiB).
func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
if size > 1<<16 {
panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
}
// algId, partyUInfo, partyVInfo inputs must be prefixed with the length
algID := lengthPrefixed([]byte(alg))
ptyUInfo := lengthPrefixed(apuData)
ptyVInfo := lengthPrefixed(apvData)
// suppPubInfo is the encoded length of the output size in bits
supPubInfo := make([]byte, 4)
binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
panic("public key not on same curve as private key")
}
z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
zBytes := z.Bytes()
// Note that calling z.Bytes() on a big.Int may strip leading zero bytes from
// the returned byte array. This can lead to a problem where zBytes will be
// shorter than expected which breaks the key derivation. Therefore we must pad
// to the full length of the expected coordinate here before calling the KDF.
octSize := dSize(priv.Curve)
if len(zBytes) != octSize {
zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...)
}
reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
key := make([]byte, size)
// Read on the KDF will never fail
_, _ = reader.Read(key)
return key
}
// dSize returns the size in octets for a coordinate on a elliptic curve.
func dSize(curve elliptic.Curve) int {
order := curve.Params().P
bitLen := order.BitLen()
size := bitLen / 8
if bitLen%8 != 0 {
size++
}
return size
}
func lengthPrefixed(data []byte) []byte {
out := make([]byte, len(data)+4)
binary.BigEndian.PutUint32(out, uint32(len(data)))
copy(out[4:], data)
return out
}

109
vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go generated vendored Normal file
View file

@ -0,0 +1,109 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"crypto/cipher"
"crypto/subtle"
"encoding/binary"
"errors"
)
var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
if len(cek)%8 != 0 {
return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
}
n := len(cek) / 8
r := make([][]byte, n)
for i := range r {
r[i] = make([]byte, 8)
copy(r[i], cek[i*8:])
}
buffer := make([]byte, 16)
tBytes := make([]byte, 8)
copy(buffer, defaultIV)
for t := 0; t < 6*n; t++ {
copy(buffer[8:], r[t%n])
block.Encrypt(buffer, buffer)
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
for i := 0; i < 8; i++ {
buffer[i] ^= tBytes[i]
}
copy(r[t%n], buffer[8:])
}
out := make([]byte, (n+1)*8)
copy(out, buffer[:8])
for i := range r {
copy(out[(i+1)*8:], r[i])
}
return out, nil
}
// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
if len(ciphertext)%8 != 0 {
return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
}
n := (len(ciphertext) / 8) - 1
r := make([][]byte, n)
for i := range r {
r[i] = make([]byte, 8)
copy(r[i], ciphertext[(i+1)*8:])
}
buffer := make([]byte, 16)
tBytes := make([]byte, 8)
copy(buffer[:8], ciphertext[:8])
for t := 6*n - 1; t >= 0; t-- {
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
for i := 0; i < 8; i++ {
buffer[i] ^= tBytes[i]
}
copy(buffer[8:], r[t%n])
block.Decrypt(buffer, buffer)
copy(r[t%n], buffer[8:])
}
if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
return nil, errors.New("go-jose/go-jose: failed to unwrap key")
}
out := make([]byte, n*8)
for i := range r {
copy(out[i*8:], r[i])
}
return out, nil
}

544
vendor/github.com/go-jose/go-jose/v3/crypter.go generated vendored Normal file
View file

@ -0,0 +1,544 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto/ecdsa"
"crypto/rsa"
"errors"
"fmt"
"reflect"
"github.com/go-jose/go-jose/v3/json"
)
// Encrypter represents an encrypter which produces an encrypted JWE object.
type Encrypter interface {
Encrypt(plaintext []byte) (*JSONWebEncryption, error)
EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
Options() EncrypterOptions
}
// A generic content cipher
type contentCipher interface {
keySize() int
encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
}
// A key generator (for generating/getting a CEK)
type keyGenerator interface {
keySize() int
genKey() ([]byte, rawHeader, error)
}
// A generic key encrypter
type keyEncrypter interface {
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
}
// A generic key decrypter
type keyDecrypter interface {
decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
}
// A generic encrypter based on the given key encrypter and content cipher.
type genericEncrypter struct {
contentAlg ContentEncryption
compressionAlg CompressionAlgorithm
cipher contentCipher
recipients []recipientKeyInfo
keyGenerator keyGenerator
extraHeaders map[HeaderKey]interface{}
}
type recipientKeyInfo struct {
keyID string
keyAlg KeyAlgorithm
keyEncrypter keyEncrypter
}
// EncrypterOptions represents options that can be set on new encrypters.
type EncrypterOptions struct {
Compression CompressionAlgorithm
// Optional map of additional keys to be inserted into the protected header
// of a JWS object. Some specifications which make use of JWS like to insert
// additional values here. All values must be JSON-serializable.
ExtraHeaders map[HeaderKey]interface{}
}
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
// if necessary. It returns itself and so can be used in a fluent style.
func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
if eo.ExtraHeaders == nil {
eo.ExtraHeaders = map[HeaderKey]interface{}{}
}
eo.ExtraHeaders[k] = v
return eo
}
// WithContentType adds a content type ("cty") header and returns the updated
// EncrypterOptions.
func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
return eo.WithHeader(HeaderContentType, contentType)
}
// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
return eo.WithHeader(HeaderType, typ)
}
// Recipient represents an algorithm/key to encrypt messages to.
//
// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
// on the password-based encryption algorithms PBES2-HS256+A128KW,
// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
// default of 100000 will be used for the count and a 128-bit random salt will
// be generated.
type Recipient struct {
Algorithm KeyAlgorithm
Key interface{}
KeyID string
PBES2Count int
PBES2Salt []byte
}
// NewEncrypter creates an appropriate encrypter based on the key type
func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
encrypter := &genericEncrypter{
contentAlg: enc,
recipients: []recipientKeyInfo{},
cipher: getContentCipher(enc),
}
if opts != nil {
encrypter.compressionAlg = opts.Compression
encrypter.extraHeaders = opts.ExtraHeaders
}
if encrypter.cipher == nil {
return nil, ErrUnsupportedAlgorithm
}
var keyID string
var rawKey interface{}
switch encryptionKey := rcpt.Key.(type) {
case JSONWebKey:
keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
case *JSONWebKey:
keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
case OpaqueKeyEncrypter:
keyID, rawKey = encryptionKey.KeyID(), encryptionKey
default:
rawKey = encryptionKey
}
switch rcpt.Algorithm {
case DIRECT:
// Direct encryption mode must be treated differently
if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
return nil, ErrUnsupportedKeyType
}
if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
return nil, ErrInvalidKeySize
}
encrypter.keyGenerator = staticKeyGenerator{
key: rawKey.([]byte),
}
recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
recipientInfo.keyID = keyID
if rcpt.KeyID != "" {
recipientInfo.keyID = rcpt.KeyID
}
encrypter.recipients = []recipientKeyInfo{recipientInfo}
return encrypter, nil
case ECDH_ES:
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
typeOf := reflect.TypeOf(rawKey)
if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
return nil, ErrUnsupportedKeyType
}
encrypter.keyGenerator = ecKeyGenerator{
size: encrypter.cipher.keySize(),
algID: string(enc),
publicKey: rawKey.(*ecdsa.PublicKey),
}
recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
recipientInfo.keyID = keyID
if rcpt.KeyID != "" {
recipientInfo.keyID = rcpt.KeyID
}
encrypter.recipients = []recipientKeyInfo{recipientInfo}
return encrypter, nil
default:
// Can just add a standard recipient
encrypter.keyGenerator = randomKeyGenerator{
size: encrypter.cipher.keySize(),
}
err := encrypter.addRecipient(rcpt)
return encrypter, err
}
}
// NewMultiEncrypter creates a multi-encrypter based on the given parameters
func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
cipher := getContentCipher(enc)
if cipher == nil {
return nil, ErrUnsupportedAlgorithm
}
if len(rcpts) == 0 {
return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty")
}
encrypter := &genericEncrypter{
contentAlg: enc,
recipients: []recipientKeyInfo{},
cipher: cipher,
keyGenerator: randomKeyGenerator{
size: cipher.keySize(),
},
}
if opts != nil {
encrypter.compressionAlg = opts.Compression
encrypter.extraHeaders = opts.ExtraHeaders
}
for _, recipient := range rcpts {
err := encrypter.addRecipient(recipient)
if err != nil {
return nil, err
}
}
return encrypter, nil
}
func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
var recipientInfo recipientKeyInfo
switch recipient.Algorithm {
case DIRECT, ECDH_ES:
return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
}
recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
if recipient.KeyID != "" {
recipientInfo.keyID = recipient.KeyID
}
switch recipient.Algorithm {
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
sr.p2c = recipient.PBES2Count
sr.p2s = recipient.PBES2Salt
}
}
if err == nil {
ctx.recipients = append(ctx.recipients, recipientInfo)
}
return err
}
func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
switch encryptionKey := encryptionKey.(type) {
case *rsa.PublicKey:
return newRSARecipient(alg, encryptionKey)
case *ecdsa.PublicKey:
return newECDHRecipient(alg, encryptionKey)
case []byte:
return newSymmetricRecipient(alg, encryptionKey)
case string:
return newSymmetricRecipient(alg, []byte(encryptionKey))
case *JSONWebKey:
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
recipient.keyID = encryptionKey.KeyID
return recipient, err
}
if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok {
return newOpaqueKeyEncrypter(alg, encrypter)
}
return recipientKeyInfo{}, ErrUnsupportedKeyType
}
// newDecrypter creates an appropriate decrypter based on the key type
func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
switch decryptionKey := decryptionKey.(type) {
case *rsa.PrivateKey:
return &rsaDecrypterSigner{
privateKey: decryptionKey,
}, nil
case *ecdsa.PrivateKey:
return &ecDecrypterSigner{
privateKey: decryptionKey,
}, nil
case []byte:
return &symmetricKeyCipher{
key: decryptionKey,
}, nil
case string:
return &symmetricKeyCipher{
key: []byte(decryptionKey),
}, nil
case JSONWebKey:
return newDecrypter(decryptionKey.Key)
case *JSONWebKey:
return newDecrypter(decryptionKey.Key)
}
if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok {
return &opaqueKeyDecrypter{decrypter: okd}, nil
}
return nil, ErrUnsupportedKeyType
}
// Implementation of encrypt method producing a JWE object.
func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
return ctx.EncryptWithAuthData(plaintext, nil)
}
// Implementation of encrypt method producing a JWE object.
func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
obj := &JSONWebEncryption{}
obj.aad = aad
obj.protected = &rawHeader{}
err := obj.protected.set(headerEncryption, ctx.contentAlg)
if err != nil {
return nil, err
}
obj.recipients = make([]recipientInfo, len(ctx.recipients))
if len(ctx.recipients) == 0 {
return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to")
}
cek, headers, err := ctx.keyGenerator.genKey()
if err != nil {
return nil, err
}
obj.protected.merge(&headers)
for i, info := range ctx.recipients {
recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
if err != nil {
return nil, err
}
err = recipient.header.set(headerAlgorithm, info.keyAlg)
if err != nil {
return nil, err
}
if info.keyID != "" {
err = recipient.header.set(headerKeyID, info.keyID)
if err != nil {
return nil, err
}
}
obj.recipients[i] = recipient
}
if len(ctx.recipients) == 1 {
// Move per-recipient headers into main protected header if there's
// only a single recipient.
obj.protected.merge(obj.recipients[0].header)
obj.recipients[0].header = nil
}
if ctx.compressionAlg != NONE {
plaintext, err = compress(ctx.compressionAlg, plaintext)
if err != nil {
return nil, err
}
err = obj.protected.set(headerCompression, ctx.compressionAlg)
if err != nil {
return nil, err
}
}
for k, v := range ctx.extraHeaders {
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
(*obj.protected)[k] = makeRawMessage(b)
}
authData := obj.computeAuthData()
parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
if err != nil {
return nil, err
}
obj.iv = parts.iv
obj.ciphertext = parts.ciphertext
obj.tag = parts.tag
return obj, nil
}
func (ctx *genericEncrypter) Options() EncrypterOptions {
return EncrypterOptions{
Compression: ctx.compressionAlg,
ExtraHeaders: ctx.extraHeaders,
}
}
// Decrypt and validate the object and return the plaintext. Note that this
// function does not support multi-recipient, if you desire multi-recipient
// decryption use DecryptMulti instead.
func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
headers := obj.mergedHeaders(nil)
if len(obj.recipients) > 1 {
return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
}
critical, err := headers.getCritical()
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
}
if len(critical) > 0 {
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
}
key := tryJWKS(decryptionKey, obj.Header)
decrypter, err := newDecrypter(key)
if err != nil {
return nil, err
}
cipher := getContentCipher(headers.getEncryption())
if cipher == nil {
return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
}
generator := randomKeyGenerator{
size: cipher.keySize(),
}
parts := &aeadParts{
iv: obj.iv,
ciphertext: obj.ciphertext,
tag: obj.tag,
}
authData := obj.computeAuthData()
var plaintext []byte
recipient := obj.recipients[0]
recipientHeaders := obj.mergedHeaders(&recipient)
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
if err == nil {
// Found a valid CEK -- let's try to decrypt.
plaintext, err = cipher.decrypt(cek, authData, parts)
}
if plaintext == nil {
return nil, ErrCryptoFailure
}
// The "zip" header parameter may only be present in the protected header.
if comp := obj.protected.getCompression(); comp != "" {
plaintext, err = decompress(comp, plaintext)
}
return plaintext, err
}
// DecryptMulti decrypts and validates the object and returns the plaintexts,
// with support for multiple recipients. It returns the index of the recipient
// for which the decryption was successful, the merged headers for that recipient,
// and the plaintext.
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
globalHeaders := obj.mergedHeaders(nil)
critical, err := globalHeaders.getCritical()
if err != nil {
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
}
if len(critical) > 0 {
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
}
key := tryJWKS(decryptionKey, obj.Header)
decrypter, err := newDecrypter(key)
if err != nil {
return -1, Header{}, nil, err
}
encryption := globalHeaders.getEncryption()
cipher := getContentCipher(encryption)
if cipher == nil {
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption))
}
generator := randomKeyGenerator{
size: cipher.keySize(),
}
parts := &aeadParts{
iv: obj.iv,
ciphertext: obj.ciphertext,
tag: obj.tag,
}
authData := obj.computeAuthData()
index := -1
var plaintext []byte
var headers rawHeader
for i, recipient := range obj.recipients {
recipientHeaders := obj.mergedHeaders(&recipient)
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
if err == nil {
// Found a valid CEK -- let's try to decrypt.
plaintext, err = cipher.decrypt(cek, authData, parts)
if err == nil {
index = i
headers = recipientHeaders
break
}
}
}
if plaintext == nil {
return -1, Header{}, nil, ErrCryptoFailure
}
// The "zip" header parameter may only be present in the protected header.
if comp := obj.protected.getCompression(); comp != "" {
plaintext, _ = decompress(comp, plaintext)
}
sanitized, err := headers.sanitized()
if err != nil {
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err)
}
return index, sanitized, plaintext, err
}

27
vendor/github.com/go-jose/go-jose/v3/doc.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Package jose aims to provide an implementation of the Javascript Object Signing
and Encryption set of standards. It implements encryption and signing based on
the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
Token support available in a sub-package. The library supports both the compact
and JWS/JWE JSON Serialization formats, and has optional support for multiple
recipients.
*/
package jose

191
vendor/github.com/go-jose/go-jose/v3/encoding.go generated vendored Normal file
View file

@ -0,0 +1,191 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"bytes"
"compress/flate"
"encoding/base64"
"encoding/binary"
"io"
"math/big"
"strings"
"unicode"
"github.com/go-jose/go-jose/v3/json"
)
// Helper function to serialize known-good objects.
// Precondition: value is not a nil pointer.
func mustSerializeJSON(value interface{}) []byte {
out, err := json.Marshal(value)
if err != nil {
panic(err)
}
// We never want to serialize the top-level value "null," since it's not a
// valid JOSE message. But if a caller passes in a nil pointer to this method,
// MarshalJSON will happily serialize it as the top-level value "null". If
// that value is then embedded in another operation, for instance by being
// base64-encoded and fed as input to a signing algorithm
// (https://github.com/go-jose/go-jose/issues/22), the result will be
// incorrect. Because this method is intended for known-good objects, and a nil
// pointer is not a known-good object, we are free to panic in this case.
// Note: It's not possible to directly check whether the data pointed at by an
// interface is a nil pointer, so we do this hacky workaround.
// https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
if string(out) == "null" {
panic("Tried to serialize a nil pointer.")
}
return out
}
// Strip all newlines and whitespace
func stripWhitespace(data string) string {
buf := strings.Builder{}
buf.Grow(len(data))
for _, r := range data {
if !unicode.IsSpace(r) {
buf.WriteRune(r)
}
}
return buf.String()
}
// Perform compression based on algorithm
func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
switch algorithm {
case DEFLATE:
return deflate(input)
default:
return nil, ErrUnsupportedAlgorithm
}
}
// Perform decompression based on algorithm
func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
switch algorithm {
case DEFLATE:
return inflate(input)
default:
return nil, ErrUnsupportedAlgorithm
}
}
// Compress with DEFLATE
func deflate(input []byte) ([]byte, error) {
output := new(bytes.Buffer)
// Writing to byte buffer, err is always nil
writer, _ := flate.NewWriter(output, 1)
_, _ = io.Copy(writer, bytes.NewBuffer(input))
err := writer.Close()
return output.Bytes(), err
}
// Decompress with DEFLATE
func inflate(input []byte) ([]byte, error) {
output := new(bytes.Buffer)
reader := flate.NewReader(bytes.NewBuffer(input))
_, err := io.Copy(output, reader)
if err != nil {
return nil, err
}
err = reader.Close()
return output.Bytes(), err
}
// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
type byteBuffer struct {
data []byte
}
func newBuffer(data []byte) *byteBuffer {
if data == nil {
return nil
}
return &byteBuffer{
data: data,
}
}
func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
if len(data) > length {
panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
}
pad := make([]byte, length-len(data))
return newBuffer(append(pad, data...))
}
func newBufferFromInt(num uint64) *byteBuffer {
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, num)
return newBuffer(bytes.TrimLeft(data, "\x00"))
}
func (b *byteBuffer) MarshalJSON() ([]byte, error) {
return json.Marshal(b.base64())
}
func (b *byteBuffer) UnmarshalJSON(data []byte) error {
var encoded string
err := json.Unmarshal(data, &encoded)
if err != nil {
return err
}
if encoded == "" {
return nil
}
decoded, err := base64URLDecode(encoded)
if err != nil {
return err
}
*b = *newBuffer(decoded)
return nil
}
func (b *byteBuffer) base64() string {
return base64.RawURLEncoding.EncodeToString(b.data)
}
func (b *byteBuffer) bytes() []byte {
// Handling nil here allows us to transparently handle nil slices when serializing.
if b == nil {
return nil
}
return b.data
}
func (b byteBuffer) bigInt() *big.Int {
return new(big.Int).SetBytes(b.data)
}
func (b byteBuffer) toInt() int {
return int(b.bigInt().Int64())
}
// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C
func base64URLDecode(value string) ([]byte, error) {
value = strings.TrimRight(value, "=")
return base64.RawURLEncoding.DecodeString(value)
}

27
vendor/github.com/go-jose/go-jose/v3/json/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

13
vendor/github.com/go-jose/go-jose/v3/json/README.md generated vendored Normal file
View file

@ -0,0 +1,13 @@
# Safe JSON
This repository contains a fork of the `encoding/json` package from Go 1.6.
The following changes were made:
* Object deserialization uses case-sensitive member name matching instead of
[case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
This is to avoid differences in the interpretation of JOSE messages between
go-jose and libraries written in other languages.
* When deserializing a JSON object, we check for duplicate keys and reject the
input whenever we detect a duplicate. Rather than trying to work with malformed
data, we prefer to reject it right away.

1217
vendor/github.com/go-jose/go-jose/v3/json/decode.go generated vendored Normal file

File diff suppressed because it is too large Load diff

1197
vendor/github.com/go-jose/go-jose/v3/json/encode.go generated vendored Normal file

File diff suppressed because it is too large Load diff

141
vendor/github.com/go-jose/go-jose/v3/json/indent.go generated vendored Normal file
View file

@ -0,0 +1,141 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(&scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
var scan scanner
scan.reset()
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(&scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

623
vendor/github.com/go-jose/go-jose/v3/json/scanner.go generated vendored Normal file
View file

@ -0,0 +1,623 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, nextValue, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import "strconv"
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// nextValue splits data after the next whole JSON value,
// returning that value and the bytes that follow it as separate slices.
// scan is passed in for use by nextValue to avoid an allocation.
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
scan.reset()
for i, c := range data {
v := scan.step(scan, c)
if v >= scanEndObject {
switch v {
// probe the scanner with a space to determine whether we will
// get scanEnd on the next character. Otherwise, if the next character
// is not a space, scanEndTop allocates a needless error.
case scanEndObject, scanEndArray:
if scan.step(scan, ' ') == scanEnd {
return data[:i+1], data[i+1:], nil
}
case scanError:
return nil, nil, scan.err
case scanEnd:
return data[:i], data[i:], nil
}
}
}
if scan.eof() == scanError {
return nil, nil, scan.err
}
return data, nil, nil
}
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset() and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// 1-byte redo (see undo method)
redo bool
redoCode int
redoState func(*scanner, byte) int
// total bytes consumed, updated by decoder.Decode
bytes int64
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.redo = false
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
func (s *scanner) pushParseState(p int) {
s.parseState = append(s.parseState, p)
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
s.redo = false
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
s.pushParseState(parseObjectKey)
return scanBeginObject
case '[':
s.step = stateBeginValueOrEmpty
s.pushParseState(parseArrayValue)
return scanBeginArray
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if c <= ' ' && isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// undo causes the scanner to return scanCode from the next state transition.
// This gives callers a simple 1-byte undo mechanism.
func (s *scanner) undo(scanCode int) {
if s.redo {
panic("json: invalid use of scanner")
}
s.redoCode = scanCode
s.redoState = s.step
s.step = stateRedo
s.redo = true
}
// stateRedo helps implement the scanner's 1-byte undo.
func stateRedo(s *scanner, c byte) int {
s.redo = false
s.step = s.redoState
return s.redoCode
}

485
vendor/github.com/go-jose/go-jose/v3/json/stream.go generated vendored Normal file
View file

@ -0,0 +1,485 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON objects from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// Deprecated: Use `SetNumberType` instead
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.numberType = UnmarshalJSONNumber }
// SetNumberType causes the Decoder to unmarshal a number into an interface{} as a
// Number, float64 or int64 depending on `t` enum value.
func (dec *Decoder) SetNumberType(t NumberUnmarshalType) { dec.d.numberType = t }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v interface{}) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value"}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
for {
// Look in the buffer for a new value.
for i, c := range dec.buf[scanp:] {
dec.scan.bytes++
v := dec.scan.step(&dec.scan, c)
if v == scanEnd {
scanp += i
break Input
}
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
scanp += i + 1
break Input
}
if v == scanError {
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
scanp = len(dec.buf)
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
err error
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState()
err := e.marshal(v)
if err != nil {
return err
}
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
if _, err = enc.w.Write(e.Bytes()); err != nil {
enc.err = err
}
encodeStatePool.Put(e)
return err
}
// RawMessage is a raw encoded JSON object.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns *m as the JSON encoding of m.
func (m *RawMessage) MarshalJSON() ([]byte, error) {
return *m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
//
type Token interface{}
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", 0}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", 0}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
clearOffset(err)
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x interface{}
if err := dec.Decode(&x); err != nil {
clearOffset(err)
return nil, err
}
return x, nil
}
}
}
func clearOffset(err error) {
if s, ok := err.(*SyntaxError); ok {
s.Offset = 0
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
/*
TODO
// EncodeToken writes the given JSON token to the stream.
// It returns an error if the delimiters [ ] { } are not properly used.
//
// EncodeToken does not call Flush, because usually it is part of
// a larger operation such as Encode, and those will call Flush when finished.
// Callers that create an Encoder and then invoke EncodeToken directly,
// without using Encode, need to call Flush when finished to ensure that
// the JSON is written to the underlying writer.
func (e *Encoder) EncodeToken(t Token) error {
...
}
*/

44
vendor/github.com/go-jose/go-jose/v3/json/tags.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

295
vendor/github.com/go-jose/go-jose/v3/jwe.go generated vendored Normal file
View file

@ -0,0 +1,295 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"encoding/base64"
"fmt"
"strings"
"github.com/go-jose/go-jose/v3/json"
)
// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
type rawJSONWebEncryption struct {
Protected *byteBuffer `json:"protected,omitempty"`
Unprotected *rawHeader `json:"unprotected,omitempty"`
Header *rawHeader `json:"header,omitempty"`
Recipients []rawRecipientInfo `json:"recipients,omitempty"`
Aad *byteBuffer `json:"aad,omitempty"`
EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
Iv *byteBuffer `json:"iv,omitempty"`
Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
Tag *byteBuffer `json:"tag,omitempty"`
}
// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
type rawRecipientInfo struct {
Header *rawHeader `json:"header,omitempty"`
EncryptedKey string `json:"encrypted_key,omitempty"`
}
// JSONWebEncryption represents an encrypted JWE object after parsing.
type JSONWebEncryption struct {
Header Header
protected, unprotected *rawHeader
recipients []recipientInfo
aad, iv, ciphertext, tag []byte
original *rawJSONWebEncryption
}
// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
type recipientInfo struct {
header *rawHeader
encryptedKey []byte
}
// GetAuthData retrieves the (optional) authenticated data attached to the object.
func (obj JSONWebEncryption) GetAuthData() []byte {
if obj.aad != nil {
out := make([]byte, len(obj.aad))
copy(out, obj.aad)
return out
}
return nil
}
// Get the merged header values
func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
out := rawHeader{}
out.merge(obj.protected)
out.merge(obj.unprotected)
if recipient != nil {
out.merge(recipient.header)
}
return out
}
// Get the additional authenticated data from a JWE object.
func (obj JSONWebEncryption) computeAuthData() []byte {
var protected string
switch {
case obj.original != nil && obj.original.Protected != nil:
protected = obj.original.Protected.base64()
case obj.protected != nil:
protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected)))
default:
protected = ""
}
output := []byte(protected)
if obj.aad != nil {
output = append(output, '.')
output = append(output, []byte(base64.RawURLEncoding.EncodeToString(obj.aad))...)
}
return output
}
// ParseEncrypted parses an encrypted message in compact or JWE JSON Serialization format.
func ParseEncrypted(input string) (*JSONWebEncryption, error) {
input = stripWhitespace(input)
if strings.HasPrefix(input, "{") {
return parseEncryptedFull(input)
}
return parseEncryptedCompact(input)
}
// parseEncryptedFull parses a message in compact format.
func parseEncryptedFull(input string) (*JSONWebEncryption, error) {
var parsed rawJSONWebEncryption
err := json.Unmarshal([]byte(input), &parsed)
if err != nil {
return nil, err
}
return parsed.sanitized()
}
// sanitized produces a cleaned-up JWE object from the raw JSON.
func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
obj := &JSONWebEncryption{
original: parsed,
unprotected: parsed.Unprotected,
}
// Check that there is not a nonce in the unprotected headers
if parsed.Unprotected != nil {
if nonce := parsed.Unprotected.getNonce(); nonce != "" {
return nil, ErrUnprotectedNonce
}
}
if parsed.Header != nil {
if nonce := parsed.Header.getNonce(); nonce != "" {
return nil, ErrUnprotectedNonce
}
}
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
}
}
// Note: this must be called _after_ we parse the protected header,
// otherwise fields from the protected header will not get picked up.
var err error
mergedHeaders := obj.mergedHeaders(nil)
obj.Header, err = mergedHeaders.sanitized()
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders)
}
if len(parsed.Recipients) == 0 {
obj.recipients = []recipientInfo{
{
header: parsed.Header,
encryptedKey: parsed.EncryptedKey.bytes(),
},
}
} else {
obj.recipients = make([]recipientInfo, len(parsed.Recipients))
for r := range parsed.Recipients {
encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey)
if err != nil {
return nil, err
}
// Check that there is not a nonce in the unprotected header
if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.getNonce() != "" {
return nil, ErrUnprotectedNonce
}
obj.recipients[r].header = parsed.Recipients[r].Header
obj.recipients[r].encryptedKey = encryptedKey
}
}
for _, recipient := range obj.recipients {
headers := obj.mergedHeaders(&recipient)
if headers.getAlgorithm() == "" || headers.getEncryption() == "" {
return nil, fmt.Errorf("go-jose/go-jose: message is missing alg/enc headers")
}
}
obj.iv = parsed.Iv.bytes()
obj.ciphertext = parsed.Ciphertext.bytes()
obj.tag = parsed.Tag.bytes()
obj.aad = parsed.Aad.bytes()
return obj, nil
}
// parseEncryptedCompact parses a message in compact format.
func parseEncryptedCompact(input string) (*JSONWebEncryption, error) {
parts := strings.Split(input, ".")
if len(parts) != 5 {
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
}
rawProtected, err := base64URLDecode(parts[0])
if err != nil {
return nil, err
}
encryptedKey, err := base64URLDecode(parts[1])
if err != nil {
return nil, err
}
iv, err := base64URLDecode(parts[2])
if err != nil {
return nil, err
}
ciphertext, err := base64URLDecode(parts[3])
if err != nil {
return nil, err
}
tag, err := base64URLDecode(parts[4])
if err != nil {
return nil, err
}
raw := &rawJSONWebEncryption{
Protected: newBuffer(rawProtected),
EncryptedKey: newBuffer(encryptedKey),
Iv: newBuffer(iv),
Ciphertext: newBuffer(ciphertext),
Tag: newBuffer(tag),
}
return raw.sanitized()
}
// CompactSerialize serializes an object using the compact serialization format.
func (obj JSONWebEncryption) CompactSerialize() (string, error) {
if len(obj.recipients) != 1 || obj.unprotected != nil ||
obj.protected == nil || obj.recipients[0].header != nil {
return "", ErrNotSupported
}
serializedProtected := mustSerializeJSON(obj.protected)
return fmt.Sprintf(
"%s.%s.%s.%s.%s",
base64.RawURLEncoding.EncodeToString(serializedProtected),
base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
base64.RawURLEncoding.EncodeToString(obj.iv),
base64.RawURLEncoding.EncodeToString(obj.ciphertext),
base64.RawURLEncoding.EncodeToString(obj.tag)), nil
}
// FullSerialize serializes an object using the full JSON serialization format.
func (obj JSONWebEncryption) FullSerialize() string {
raw := rawJSONWebEncryption{
Unprotected: obj.unprotected,
Iv: newBuffer(obj.iv),
Ciphertext: newBuffer(obj.ciphertext),
EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
Tag: newBuffer(obj.tag),
Aad: newBuffer(obj.aad),
Recipients: []rawRecipientInfo{},
}
if len(obj.recipients) > 1 {
for _, recipient := range obj.recipients {
info := rawRecipientInfo{
Header: recipient.header,
EncryptedKey: base64.RawURLEncoding.EncodeToString(recipient.encryptedKey),
}
raw.Recipients = append(raw.Recipients, info)
}
} else {
// Use flattened serialization
raw.Header = obj.recipients[0].header
raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
}
if obj.protected != nil {
raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
}
return string(mustSerializeJSON(raw))
}

798
vendor/github.com/go-jose/go-jose/v3/jwk.go generated vendored Normal file
View file

@ -0,0 +1,798 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"math/big"
"net/url"
"reflect"
"strings"
"github.com/go-jose/go-jose/v3/json"
)
// rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing.
type rawJSONWebKey struct {
Use string `json:"use,omitempty"`
Kty string `json:"kty,omitempty"`
Kid string `json:"kid,omitempty"`
Crv string `json:"crv,omitempty"`
Alg string `json:"alg,omitempty"`
K *byteBuffer `json:"k,omitempty"`
X *byteBuffer `json:"x,omitempty"`
Y *byteBuffer `json:"y,omitempty"`
N *byteBuffer `json:"n,omitempty"`
E *byteBuffer `json:"e,omitempty"`
// -- Following fields are only used for private keys --
// RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
// completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
// we have a private key whereas D == nil means we have only a public key.
D *byteBuffer `json:"d,omitempty"`
P *byteBuffer `json:"p,omitempty"`
Q *byteBuffer `json:"q,omitempty"`
Dp *byteBuffer `json:"dp,omitempty"`
Dq *byteBuffer `json:"dq,omitempty"`
Qi *byteBuffer `json:"qi,omitempty"`
// Certificates
X5c []string `json:"x5c,omitempty"`
X5u string `json:"x5u,omitempty"`
X5tSHA1 string `json:"x5t,omitempty"`
X5tSHA256 string `json:"x5t#S256,omitempty"`
}
// JSONWebKey represents a public or private key in JWK format.
type JSONWebKey struct {
// Cryptographic key, can be a symmetric or asymmetric key.
Key interface{}
// Key identifier, parsed from `kid` header.
KeyID string
// Key algorithm, parsed from `alg` header.
Algorithm string
// Key use, parsed from `use` header.
Use string
// X.509 certificate chain, parsed from `x5c` header.
Certificates []*x509.Certificate
// X.509 certificate URL, parsed from `x5u` header.
CertificatesURL *url.URL
// X.509 certificate thumbprint (SHA-1), parsed from `x5t` header.
CertificateThumbprintSHA1 []byte
// X.509 certificate thumbprint (SHA-256), parsed from `x5t#S256` header.
CertificateThumbprintSHA256 []byte
}
// MarshalJSON serializes the given key to its JSON representation.
func (k JSONWebKey) MarshalJSON() ([]byte, error) {
var raw *rawJSONWebKey
var err error
switch key := k.Key.(type) {
case ed25519.PublicKey:
raw = fromEdPublicKey(key)
case *ecdsa.PublicKey:
raw, err = fromEcPublicKey(key)
case *rsa.PublicKey:
raw = fromRsaPublicKey(key)
case ed25519.PrivateKey:
raw, err = fromEdPrivateKey(key)
case *ecdsa.PrivateKey:
raw, err = fromEcPrivateKey(key)
case *rsa.PrivateKey:
raw, err = fromRsaPrivateKey(key)
case []byte:
raw, err = fromSymmetricKey(key)
default:
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
}
if err != nil {
return nil, err
}
raw.Kid = k.KeyID
raw.Alg = k.Algorithm
raw.Use = k.Use
for _, cert := range k.Certificates {
raw.X5c = append(raw.X5c, base64.StdEncoding.EncodeToString(cert.Raw))
}
x5tSHA1Len := len(k.CertificateThumbprintSHA1)
x5tSHA256Len := len(k.CertificateThumbprintSHA256)
if x5tSHA1Len > 0 {
if x5tSHA1Len != sha1.Size {
return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-1 thumbprint (must be %d bytes, not %d)", sha1.Size, x5tSHA1Len)
}
raw.X5tSHA1 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA1)
}
if x5tSHA256Len > 0 {
if x5tSHA256Len != sha256.Size {
return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-256 thumbprint (must be %d bytes, not %d)", sha256.Size, x5tSHA256Len)
}
raw.X5tSHA256 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA256)
}
// If cert chain is attached (as opposed to being behind a URL), check the
// keys thumbprints to make sure they match what is expected. This is to
// ensure we don't accidentally produce a JWK with semantically inconsistent
// data in the headers.
if len(k.Certificates) > 0 {
expectedSHA1 := sha1.Sum(k.Certificates[0].Raw)
expectedSHA256 := sha256.Sum256(k.Certificates[0].Raw)
if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(k.CertificateThumbprintSHA1, expectedSHA1[:]) {
return nil, errors.New("go-jose/go-jose: invalid SHA-1 thumbprint, does not match cert chain")
}
if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(k.CertificateThumbprintSHA256, expectedSHA256[:]) {
return nil, errors.New("go-jose/go-jose: invalid or SHA-256 thumbprint, does not match cert chain")
}
}
if k.CertificatesURL != nil {
raw.X5u = k.CertificatesURL.String()
}
return json.Marshal(raw)
}
// UnmarshalJSON reads a key from its JSON representation.
func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
var raw rawJSONWebKey
err = json.Unmarshal(data, &raw)
if err != nil {
return err
}
certs, err := parseCertificateChain(raw.X5c)
if err != nil {
return fmt.Errorf("go-jose/go-jose: failed to unmarshal x5c field: %s", err)
}
var key interface{}
var certPub interface{}
var keyPub interface{}
if len(certs) > 0 {
// We need to check that leaf public key matches the key embedded in this
// JWK, as required by the standard (see RFC 7517, Section 4.7). Otherwise
// the JWK parsed could be semantically invalid. Technically, should also
// check key usage fields and other extensions on the cert here, but the
// standard doesn't exactly explain how they're supposed to map from the
// JWK representation to the X.509 extensions.
certPub = certs[0].PublicKey
}
switch raw.Kty {
case "EC":
if raw.D != nil {
key, err = raw.ecPrivateKey()
if err == nil {
keyPub = key.(*ecdsa.PrivateKey).Public()
}
} else {
key, err = raw.ecPublicKey()
keyPub = key
}
case "RSA":
if raw.D != nil {
key, err = raw.rsaPrivateKey()
if err == nil {
keyPub = key.(*rsa.PrivateKey).Public()
}
} else {
key, err = raw.rsaPublicKey()
keyPub = key
}
case "oct":
if certPub != nil {
return errors.New("go-jose/go-jose: invalid JWK, found 'oct' (symmetric) key with cert chain")
}
key, err = raw.symmetricKey()
case "OKP":
if raw.Crv == "Ed25519" && raw.X != nil {
if raw.D != nil {
key, err = raw.edPrivateKey()
if err == nil {
keyPub = key.(ed25519.PrivateKey).Public()
}
} else {
key, err = raw.edPublicKey()
keyPub = key
}
} else {
err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
}
default:
err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
}
if err != nil {
return
}
if certPub != nil && keyPub != nil {
if !reflect.DeepEqual(certPub, keyPub) {
return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match")
}
}
*k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use, Certificates: certs}
if raw.X5u != "" {
k.CertificatesURL, err = url.Parse(raw.X5u)
if err != nil {
return fmt.Errorf("go-jose/go-jose: invalid JWK, x5u header is invalid URL: %w", err)
}
}
// x5t parameters are base64url-encoded SHA thumbprints
// See RFC 7517, Section 4.8, https://tools.ietf.org/html/rfc7517#section-4.8
x5tSHA1bytes, err := base64URLDecode(raw.X5tSHA1)
if err != nil {
return errors.New("go-jose/go-jose: invalid JWK, x5t header has invalid encoding")
}
// RFC 7517, Section 4.8 is ambiguous as to whether the digest output should be byte or hex,
// for this reason, after base64 decoding, if the size is sha1.Size it's likely that the value is a byte encoded
// checksum so we skip this. Otherwise if the checksum was hex encoded we expect a 40 byte sized array so we'll
// try to hex decode it. When Marshalling this value we'll always use a base64 encoded version of byte format checksum.
if len(x5tSHA1bytes) == 2*sha1.Size {
hx, err := hex.DecodeString(string(x5tSHA1bytes))
if err != nil {
return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t: %v", err)
}
x5tSHA1bytes = hx
}
k.CertificateThumbprintSHA1 = x5tSHA1bytes
x5tSHA256bytes, err := base64URLDecode(raw.X5tSHA256)
if err != nil {
return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header has invalid encoding")
}
if len(x5tSHA256bytes) == 2*sha256.Size {
hx256, err := hex.DecodeString(string(x5tSHA256bytes))
if err != nil {
return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t#S256: %v", err)
}
x5tSHA256bytes = hx256
}
k.CertificateThumbprintSHA256 = x5tSHA256bytes
x5tSHA1Len := len(k.CertificateThumbprintSHA1)
x5tSHA256Len := len(k.CertificateThumbprintSHA256)
if x5tSHA1Len > 0 && x5tSHA1Len != sha1.Size {
return errors.New("go-jose/go-jose: invalid JWK, x5t header is of incorrect size")
}
if x5tSHA256Len > 0 && x5tSHA256Len != sha256.Size {
return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header is of incorrect size")
}
// If certificate chain *and* thumbprints are set, verify correctness.
if len(k.Certificates) > 0 {
leaf := k.Certificates[0]
sha1sum := sha1.Sum(leaf.Raw)
sha256sum := sha256.Sum256(leaf.Raw)
if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(sha1sum[:], k.CertificateThumbprintSHA1) {
return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t value")
}
if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(sha256sum[:], k.CertificateThumbprintSHA256) {
return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t#S256 value")
}
}
return
}
// JSONWebKeySet represents a JWK Set object.
type JSONWebKeySet struct {
Keys []JSONWebKey `json:"keys"`
}
// Key convenience method returns keys by key ID. Specification states
// that a JWK Set "SHOULD" use distinct key IDs, but allows for some
// cases where they are not distinct. Hence method returns a slice
// of JSONWebKeys.
func (s *JSONWebKeySet) Key(kid string) []JSONWebKey {
var keys []JSONWebKey
for _, key := range s.Keys {
if key.KeyID == kid {
keys = append(keys, key)
}
}
return keys
}
const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
const edThumbprintTemplate = `{"crv":"%s","kty":"OKP","x":"%s"}`
func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
coordLength := curveSize(curve)
crv, err := curveName(curve)
if err != nil {
return "", err
}
if len(x.Bytes()) > coordLength || len(y.Bytes()) > coordLength {
return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)")
}
return fmt.Sprintf(ecThumbprintTemplate, crv,
newFixedSizeBuffer(x.Bytes(), coordLength).base64(),
newFixedSizeBuffer(y.Bytes(), coordLength).base64()), nil
}
func rsaThumbprintInput(n *big.Int, e int) (string, error) {
return fmt.Sprintf(rsaThumbprintTemplate,
newBufferFromInt(uint64(e)).base64(),
newBuffer(n.Bytes()).base64()), nil
}
func edThumbprintInput(ed ed25519.PublicKey) (string, error) {
crv := "Ed25519"
if len(ed) > 32 {
return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)")
}
return fmt.Sprintf(edThumbprintTemplate, crv,
newFixedSizeBuffer(ed, 32).base64()), nil
}
// Thumbprint computes the JWK Thumbprint of a key using the
// indicated hash algorithm.
func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
var input string
var err error
switch key := k.Key.(type) {
case ed25519.PublicKey:
input, err = edThumbprintInput(key)
case *ecdsa.PublicKey:
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
case *ecdsa.PrivateKey:
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
case *rsa.PublicKey:
input, err = rsaThumbprintInput(key.N, key.E)
case *rsa.PrivateKey:
input, err = rsaThumbprintInput(key.N, key.E)
case ed25519.PrivateKey:
input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
default:
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
}
if err != nil {
return nil, err
}
h := hash.New()
_, _ = h.Write([]byte(input))
return h.Sum(nil), nil
}
// IsPublic returns true if the JWK represents a public key (not symmetric, not private).
func (k *JSONWebKey) IsPublic() bool {
switch k.Key.(type) {
case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
return true
default:
return false
}
}
// Public creates JSONWebKey with corresponding public key if JWK represents asymmetric private key.
func (k *JSONWebKey) Public() JSONWebKey {
if k.IsPublic() {
return *k
}
ret := *k
switch key := k.Key.(type) {
case *ecdsa.PrivateKey:
ret.Key = key.Public()
case *rsa.PrivateKey:
ret.Key = key.Public()
case ed25519.PrivateKey:
ret.Key = key.Public()
default:
return JSONWebKey{} // returning invalid key
}
return ret
}
// Valid checks that the key contains the expected parameters.
func (k *JSONWebKey) Valid() bool {
if k.Key == nil {
return false
}
switch key := k.Key.(type) {
case *ecdsa.PublicKey:
if key.Curve == nil || key.X == nil || key.Y == nil {
return false
}
case *ecdsa.PrivateKey:
if key.Curve == nil || key.X == nil || key.Y == nil || key.D == nil {
return false
}
case *rsa.PublicKey:
if key.N == nil || key.E == 0 {
return false
}
case *rsa.PrivateKey:
if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 {
return false
}
case ed25519.PublicKey:
if len(key) != 32 {
return false
}
case ed25519.PrivateKey:
if len(key) != 64 {
return false
}
default:
return false
}
return true
}
func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
if key.N == nil || key.E == nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA key, missing n/e values")
}
return &rsa.PublicKey{
N: key.N.bigInt(),
E: key.E.toInt(),
}, nil
}
func fromEdPublicKey(pub ed25519.PublicKey) *rawJSONWebKey {
return &rawJSONWebKey{
Kty: "OKP",
Crv: "Ed25519",
X: newBuffer(pub),
}
}
func fromRsaPublicKey(pub *rsa.PublicKey) *rawJSONWebKey {
return &rawJSONWebKey{
Kty: "RSA",
N: newBuffer(pub.N.Bytes()),
E: newBufferFromInt(uint64(pub.E)),
}
}
func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
var curve elliptic.Curve
switch key.Crv {
case "P-256":
curve = elliptic.P256()
case "P-384":
curve = elliptic.P384()
case "P-521":
curve = elliptic.P521()
default:
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
}
if key.X == nil || key.Y == nil {
return nil, errors.New("go-jose/go-jose: invalid EC key, missing x/y values")
}
// The length of this octet string MUST be the full size of a coordinate for
// the curve specified in the "crv" parameter.
// https://tools.ietf.org/html/rfc7518#section-6.2.1.2
if curveSize(curve) != len(key.X.data) {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for x")
}
if curveSize(curve) != len(key.Y.data) {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for y")
}
x := key.X.bigInt()
y := key.Y.bigInt()
if !curve.IsOnCurve(x, y) {
return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve")
}
return &ecdsa.PublicKey{
Curve: curve,
X: x,
Y: y,
}, nil
}
func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
if pub == nil || pub.X == nil || pub.Y == nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (nil, or X/Y missing)")
}
name, err := curveName(pub.Curve)
if err != nil {
return nil, err
}
size := curveSize(pub.Curve)
xBytes := pub.X.Bytes()
yBytes := pub.Y.Bytes()
if len(xBytes) > size || len(yBytes) > size {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (X/Y too large)")
}
key := &rawJSONWebKey{
Kty: "EC",
Crv: name,
X: newFixedSizeBuffer(xBytes, size),
Y: newFixedSizeBuffer(yBytes, size),
}
return key, nil
}
func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
var missing []string
switch {
case key.D == nil:
missing = append(missing, "D")
case key.X == nil:
missing = append(missing, "X")
}
if len(missing) > 0 {
return nil, fmt.Errorf("go-jose/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", "))
}
privateKey := make([]byte, ed25519.PrivateKeySize)
copy(privateKey[0:32], key.D.bytes())
copy(privateKey[32:], key.X.bytes())
rv := ed25519.PrivateKey(privateKey)
return rv, nil
}
func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
if key.X == nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid Ed key, missing x value")
}
publicKey := make([]byte, ed25519.PublicKeySize)
copy(publicKey[0:32], key.X.bytes())
rv := ed25519.PublicKey(publicKey)
return rv, nil
}
func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
var missing []string
switch {
case key.N == nil:
missing = append(missing, "N")
case key.E == nil:
missing = append(missing, "E")
case key.D == nil:
missing = append(missing, "D")
case key.P == nil:
missing = append(missing, "P")
case key.Q == nil:
missing = append(missing, "Q")
}
if len(missing) > 0 {
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
}
rv := &rsa.PrivateKey{
PublicKey: rsa.PublicKey{
N: key.N.bigInt(),
E: key.E.toInt(),
},
D: key.D.bigInt(),
Primes: []*big.Int{
key.P.bigInt(),
key.Q.bigInt(),
},
}
if key.Dp != nil {
rv.Precomputed.Dp = key.Dp.bigInt()
}
if key.Dq != nil {
rv.Precomputed.Dq = key.Dq.bigInt()
}
if key.Qi != nil {
rv.Precomputed.Qinv = key.Qi.bigInt()
}
err := rv.Validate()
return rv, err
}
func fromEdPrivateKey(ed ed25519.PrivateKey) (*rawJSONWebKey, error) {
raw := fromEdPublicKey(ed25519.PublicKey(ed[32:]))
raw.D = newBuffer(ed[0:32])
return raw, nil
}
func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJSONWebKey, error) {
if len(rsa.Primes) != 2 {
return nil, ErrUnsupportedKeyType
}
raw := fromRsaPublicKey(&rsa.PublicKey)
raw.D = newBuffer(rsa.D.Bytes())
raw.P = newBuffer(rsa.Primes[0].Bytes())
raw.Q = newBuffer(rsa.Primes[1].Bytes())
if rsa.Precomputed.Dp != nil {
raw.Dp = newBuffer(rsa.Precomputed.Dp.Bytes())
}
if rsa.Precomputed.Dq != nil {
raw.Dq = newBuffer(rsa.Precomputed.Dq.Bytes())
}
if rsa.Precomputed.Qinv != nil {
raw.Qi = newBuffer(rsa.Precomputed.Qinv.Bytes())
}
return raw, nil
}
func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
var curve elliptic.Curve
switch key.Crv {
case "P-256":
curve = elliptic.P256()
case "P-384":
curve = elliptic.P384()
case "P-521":
curve = elliptic.P521()
default:
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
}
if key.X == nil || key.Y == nil || key.D == nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing x/y/d values")
}
// The length of this octet string MUST be the full size of a coordinate for
// the curve specified in the "crv" parameter.
// https://tools.ietf.org/html/rfc7518#section-6.2.1.2
if curveSize(curve) != len(key.X.data) {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for x")
}
if curveSize(curve) != len(key.Y.data) {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for y")
}
// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
if dSize(curve) != len(key.D.data) {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for d")
}
x := key.X.bigInt()
y := key.Y.bigInt()
if !curve.IsOnCurve(x, y) {
return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve")
}
return &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: curve,
X: x,
Y: y,
},
D: key.D.bigInt(),
}, nil
}
func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) {
raw, err := fromEcPublicKey(&ec.PublicKey)
if err != nil {
return nil, err
}
if ec.D == nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key")
}
raw.D = newFixedSizeBuffer(ec.D.Bytes(), dSize(ec.PublicKey.Curve))
return raw, nil
}
// dSize returns the size in octets for the "d" member of an elliptic curve
// private key.
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
// octets (where n is the order of the curve).
// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
func dSize(curve elliptic.Curve) int {
order := curve.Params().P
bitLen := order.BitLen()
size := bitLen / 8
if bitLen%8 != 0 {
size++
}
return size
}
func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) {
return &rawJSONWebKey{
Kty: "oct",
K: newBuffer(key),
}, nil
}
func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
if key.K == nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid OCT (symmetric) key, missing k value")
}
return key.K.bytes(), nil
}
func tryJWKS(key interface{}, headers ...Header) interface{} {
var jwks JSONWebKeySet
switch jwksType := key.(type) {
case *JSONWebKeySet:
jwks = *jwksType
case JSONWebKeySet:
jwks = jwksType
default:
return key
}
var kid string
for _, header := range headers {
if header.KeyID != "" {
kid = header.KeyID
break
}
}
if kid == "" {
return key
}
keys := jwks.Key(kid)
if len(keys) == 0 {
return key
}
return keys[0].Key
}

366
vendor/github.com/go-jose/go-jose/v3/jws.go generated vendored Normal file
View file

@ -0,0 +1,366 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"strings"
"github.com/go-jose/go-jose/v3/json"
)
// rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
type rawJSONWebSignature struct {
Payload *byteBuffer `json:"payload,omitempty"`
Signatures []rawSignatureInfo `json:"signatures,omitempty"`
Protected *byteBuffer `json:"protected,omitempty"`
Header *rawHeader `json:"header,omitempty"`
Signature *byteBuffer `json:"signature,omitempty"`
}
// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
type rawSignatureInfo struct {
Protected *byteBuffer `json:"protected,omitempty"`
Header *rawHeader `json:"header,omitempty"`
Signature *byteBuffer `json:"signature,omitempty"`
}
// JSONWebSignature represents a signed JWS object after parsing.
type JSONWebSignature struct {
payload []byte
// Signatures attached to this object (may be more than one for multi-sig).
// Be careful about accessing these directly, prefer to use Verify() or
// VerifyMulti() to ensure that the data you're getting is verified.
Signatures []Signature
}
// Signature represents a single signature over the JWS payload and protected header.
type Signature struct {
// Merged header fields. Contains both protected and unprotected header
// values. Prefer using Protected and Unprotected fields instead of this.
// Values in this header may or may not have been signed and in general
// should not be trusted.
Header Header
// Protected header. Values in this header were signed and
// will be verified as part of the signature verification process.
Protected Header
// Unprotected header. Values in this header were not signed
// and in general should not be trusted.
Unprotected Header
// The actual signature value
Signature []byte
protected *rawHeader
header *rawHeader
original *rawSignatureInfo
}
// ParseSigned parses a signed message in compact or JWS JSON Serialization format.
func ParseSigned(signature string) (*JSONWebSignature, error) {
signature = stripWhitespace(signature)
if strings.HasPrefix(signature, "{") {
return parseSignedFull(signature)
}
return parseSignedCompact(signature, nil)
}
// ParseDetached parses a signed message in compact serialization format with detached payload.
func ParseDetached(signature string, payload []byte) (*JSONWebSignature, error) {
if payload == nil {
return nil, errors.New("go-jose/go-jose: nil payload")
}
return parseSignedCompact(stripWhitespace(signature), payload)
}
// Get a header value
func (sig Signature) mergedHeaders() rawHeader {
out := rawHeader{}
out.merge(sig.protected)
out.merge(sig.header)
return out
}
// Compute data to be signed
func (obj JSONWebSignature) computeAuthData(payload []byte, signature *Signature) ([]byte, error) {
var authData bytes.Buffer
protectedHeader := new(rawHeader)
if signature.original != nil && signature.original.Protected != nil {
if err := json.Unmarshal(signature.original.Protected.bytes(), protectedHeader); err != nil {
return nil, err
}
authData.WriteString(signature.original.Protected.base64())
} else if signature.protected != nil {
protectedHeader = signature.protected
authData.WriteString(base64.RawURLEncoding.EncodeToString(mustSerializeJSON(protectedHeader)))
}
needsBase64 := true
if protectedHeader != nil {
var err error
if needsBase64, err = protectedHeader.getB64(); err != nil {
needsBase64 = true
}
}
authData.WriteByte('.')
if needsBase64 {
authData.WriteString(base64.RawURLEncoding.EncodeToString(payload))
} else {
authData.Write(payload)
}
return authData.Bytes(), nil
}
// parseSignedFull parses a message in full format.
func parseSignedFull(input string) (*JSONWebSignature, error) {
var parsed rawJSONWebSignature
err := json.Unmarshal([]byte(input), &parsed)
if err != nil {
return nil, err
}
return parsed.sanitized()
}
// sanitized produces a cleaned-up JWS object from the raw JSON.
func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
if parsed.Payload == nil {
return nil, fmt.Errorf("go-jose/go-jose: missing payload in JWS message")
}
obj := &JSONWebSignature{
payload: parsed.Payload.bytes(),
Signatures: make([]Signature, len(parsed.Signatures)),
}
if len(parsed.Signatures) == 0 {
// No signatures array, must be flattened serialization
signature := Signature{}
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
signature.protected = &rawHeader{}
err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
if err != nil {
return nil, err
}
}
// Check that there is not a nonce in the unprotected header
if parsed.Header != nil && parsed.Header.getNonce() != "" {
return nil, ErrUnprotectedNonce
}
signature.header = parsed.Header
signature.Signature = parsed.Signature.bytes()
// Make a fake "original" rawSignatureInfo to store the unprocessed
// Protected header. This is necessary because the Protected header can
// contain arbitrary fields not registered as part of the spec. See
// https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
// If we unmarshal Protected into a rawHeader with its explicit list of fields,
// we cannot marshal losslessly. So we have to keep around the original bytes.
// This is used in computeAuthData, which will first attempt to use
// the original bytes of a protected header, and fall back on marshaling the
// header struct only if those bytes are not available.
signature.original = &rawSignatureInfo{
Protected: parsed.Protected,
Header: parsed.Header,
Signature: parsed.Signature,
}
var err error
signature.Header, err = signature.mergedHeaders().sanitized()
if err != nil {
return nil, err
}
if signature.header != nil {
signature.Unprotected, err = signature.header.sanitized()
if err != nil {
return nil, err
}
}
if signature.protected != nil {
signature.Protected, err = signature.protected.sanitized()
if err != nil {
return nil, err
}
}
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
jwk := signature.Header.JSONWebKey
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key")
}
obj.Signatures = append(obj.Signatures, signature)
}
for i, sig := range parsed.Signatures {
if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
obj.Signatures[i].protected = &rawHeader{}
err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
if err != nil {
return nil, err
}
}
// Check that there is not a nonce in the unprotected header
if sig.Header != nil && sig.Header.getNonce() != "" {
return nil, ErrUnprotectedNonce
}
var err error
obj.Signatures[i].Header, err = obj.Signatures[i].mergedHeaders().sanitized()
if err != nil {
return nil, err
}
if obj.Signatures[i].header != nil {
obj.Signatures[i].Unprotected, err = obj.Signatures[i].header.sanitized()
if err != nil {
return nil, err
}
}
if obj.Signatures[i].protected != nil {
obj.Signatures[i].Protected, err = obj.Signatures[i].protected.sanitized()
if err != nil {
return nil, err
}
}
obj.Signatures[i].Signature = sig.Signature.bytes()
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
jwk := obj.Signatures[i].Header.JSONWebKey
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key")
}
// Copy value of sig
original := sig
obj.Signatures[i].header = sig.Header
obj.Signatures[i].original = &original
}
return obj, nil
}
// parseSignedCompact parses a message in compact format.
func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) {
parts := strings.Split(input, ".")
if len(parts) != 3 {
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
if parts[1] != "" && payload != nil {
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
}
rawProtected, err := base64URLDecode(parts[0])
if err != nil {
return nil, err
}
if payload == nil {
payload, err = base64URLDecode(parts[1])
if err != nil {
return nil, err
}
}
signature, err := base64URLDecode(parts[2])
if err != nil {
return nil, err
}
raw := &rawJSONWebSignature{
Payload: newBuffer(payload),
Protected: newBuffer(rawProtected),
Signature: newBuffer(signature),
}
return raw.sanitized()
}
func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) {
if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil {
return "", ErrNotSupported
}
serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected))
payload := ""
signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)
if !detached {
payload = base64.RawURLEncoding.EncodeToString(obj.payload)
}
return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil
}
// CompactSerialize serializes an object using the compact serialization format.
func (obj JSONWebSignature) CompactSerialize() (string, error) {
return obj.compactSerialize(false)
}
// DetachedCompactSerialize serializes an object using the compact serialization format with detached payload.
func (obj JSONWebSignature) DetachedCompactSerialize() (string, error) {
return obj.compactSerialize(true)
}
// FullSerialize serializes an object using the full JSON serialization format.
func (obj JSONWebSignature) FullSerialize() string {
raw := rawJSONWebSignature{
Payload: newBuffer(obj.payload),
}
if len(obj.Signatures) == 1 {
if obj.Signatures[0].protected != nil {
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
raw.Protected = newBuffer(serializedProtected)
}
raw.Header = obj.Signatures[0].header
raw.Signature = newBuffer(obj.Signatures[0].Signature)
} else {
raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
for i, signature := range obj.Signatures {
raw.Signatures[i] = rawSignatureInfo{
Header: signature.header,
Signature: newBuffer(signature.Signature),
}
if signature.protected != nil {
raw.Signatures[i].Protected = newBuffer(mustSerializeJSON(signature.protected))
}
}
}
return string(mustSerializeJSON(raw))
}

144
vendor/github.com/go-jose/go-jose/v3/opaque.go generated vendored Normal file
View file

@ -0,0 +1,144 @@
/*-
* Copyright 2018 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
// OpaqueSigner is an interface that supports signing payloads with opaque
// private key(s). Private key operations performed by implementers may, for
// example, occur in a hardware module. An OpaqueSigner may rotate signing keys
// transparently to the user of this interface.
type OpaqueSigner interface {
// Public returns the public key of the current signing key.
Public() *JSONWebKey
// Algs returns a list of supported signing algorithms.
Algs() []SignatureAlgorithm
// SignPayload signs a payload with the current signing key using the given
// algorithm.
SignPayload(payload []byte, alg SignatureAlgorithm) ([]byte, error)
}
type opaqueSigner struct {
signer OpaqueSigner
}
func newOpaqueSigner(alg SignatureAlgorithm, signer OpaqueSigner) (recipientSigInfo, error) {
var algSupported bool
for _, salg := range signer.Algs() {
if alg == salg {
algSupported = true
break
}
}
if !algSupported {
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
return recipientSigInfo{
sigAlg: alg,
publicKey: signer.Public,
signer: &opaqueSigner{
signer: signer,
},
}, nil
}
func (o *opaqueSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
out, err := o.signer.SignPayload(payload, alg)
if err != nil {
return Signature{}, err
}
return Signature{
Signature: out,
protected: &rawHeader{},
}, nil
}
// OpaqueVerifier is an interface that supports verifying payloads with opaque
// public key(s). An OpaqueSigner may rotate signing keys transparently to the
// user of this interface.
type OpaqueVerifier interface {
VerifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
}
type opaqueVerifier struct {
verifier OpaqueVerifier
}
func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
return o.verifier.VerifyPayload(payload, signature, alg)
}
// OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key.
type OpaqueKeyEncrypter interface {
// KeyID returns the kid
KeyID() string
// Algs returns a list of supported key encryption algorithms.
Algs() []KeyAlgorithm
// encryptKey encrypts the CEK using the given algorithm.
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error)
}
type opaqueKeyEncrypter struct {
encrypter OpaqueKeyEncrypter
}
func newOpaqueKeyEncrypter(alg KeyAlgorithm, encrypter OpaqueKeyEncrypter) (recipientKeyInfo, error) {
var algSupported bool
for _, salg := range encrypter.Algs() {
if alg == salg {
algSupported = true
break
}
}
if !algSupported {
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
}
return recipientKeyInfo{
keyID: encrypter.KeyID(),
keyAlg: alg,
keyEncrypter: &opaqueKeyEncrypter{
encrypter: encrypter,
},
}, nil
}
func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
return oke.encrypter.encryptKey(cek, alg)
}
//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key.
type OpaqueKeyDecrypter interface {
DecryptKey(encryptedKey []byte, header Header) ([]byte, error)
}
type opaqueKeyDecrypter struct {
decrypter OpaqueKeyDecrypter
}
func (okd *opaqueKeyDecrypter) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
mergedHeaders := rawHeader{}
mergedHeaders.merge(&headers)
mergedHeaders.merge(recipient.header)
header, err := mergedHeaders.sanitized()
if err != nil {
return nil, err
}
return okd.decrypter.DecryptKey(recipient.encryptedKey, header)
}

520
vendor/github.com/go-jose/go-jose/v3/shared.go generated vendored Normal file
View file

@ -0,0 +1,520 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto/elliptic"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"github.com/go-jose/go-jose/v3/json"
)
// KeyAlgorithm represents a key management algorithm.
type KeyAlgorithm string
// SignatureAlgorithm represents a signature (or MAC) algorithm.
type SignatureAlgorithm string
// ContentEncryption represents a content encryption algorithm.
type ContentEncryption string
// CompressionAlgorithm represents an algorithm used for plaintext compression.
type CompressionAlgorithm string
// ContentType represents type of the contained data.
type ContentType string
var (
// ErrCryptoFailure represents an error in cryptographic primitive. This
// occurs when, for example, a message had an invalid authentication tag or
// could not be decrypted.
ErrCryptoFailure = errors.New("go-jose/go-jose: error in cryptographic primitive")
// ErrUnsupportedAlgorithm indicates that a selected algorithm is not
// supported. This occurs when trying to instantiate an encrypter for an
// algorithm that is not yet implemented.
ErrUnsupportedAlgorithm = errors.New("go-jose/go-jose: unknown/unsupported algorithm")
// ErrUnsupportedKeyType indicates that the given key type/format is not
// supported. This occurs when trying to instantiate an encrypter and passing
// it a key of an unrecognized type or with unsupported parameters, such as
// an RSA private key with more than two primes.
ErrUnsupportedKeyType = errors.New("go-jose/go-jose: unsupported key type/format")
// ErrInvalidKeySize indicates that the given key is not the correct size
// for the selected algorithm. This can occur, for example, when trying to
// encrypt with AES-256 but passing only a 128-bit key as input.
ErrInvalidKeySize = errors.New("go-jose/go-jose: invalid key size for algorithm")
// ErrNotSupported serialization of object is not supported. This occurs when
// trying to compact-serialize an object which can't be represented in
// compact form.
ErrNotSupported = errors.New("go-jose/go-jose: compact serialization not supported for object")
// ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
// nonce header parameter was included in an unprotected header object.
ErrUnprotectedNonce = errors.New("go-jose/go-jose: Nonce parameter included in unprotected header")
)
// Key management algorithms
const (
ED25519 = KeyAlgorithm("ED25519")
RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
DIRECT = KeyAlgorithm("dir") // Direct encryption
ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
)
// Signature algorithms
const (
EdDSA = SignatureAlgorithm("EdDSA")
HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256
ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384
ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512
PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
)
// Content encryption algorithms
const (
A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
)
// Compression algorithms
const (
NONE = CompressionAlgorithm("") // No compression
DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
)
// A key in the protected header of a JWS object. Use of the Header...
// constants is preferred to enhance type safety.
type HeaderKey string
const (
HeaderType = "typ" // string
HeaderContentType = "cty" // string
// These are set by go-jose and shouldn't need to be set by consumers of the
// library.
headerAlgorithm = "alg" // string
headerEncryption = "enc" // ContentEncryption
headerCompression = "zip" // CompressionAlgorithm
headerCritical = "crit" // []string
headerAPU = "apu" // *byteBuffer
headerAPV = "apv" // *byteBuffer
headerEPK = "epk" // *JSONWebKey
headerIV = "iv" // *byteBuffer
headerTag = "tag" // *byteBuffer
headerX5c = "x5c" // []*x509.Certificate
headerJWK = "jwk" // *JSONWebKey
headerKeyID = "kid" // string
headerNonce = "nonce" // string
headerB64 = "b64" // bool
headerP2C = "p2c" // *byteBuffer (int)
headerP2S = "p2s" // *byteBuffer ([]byte)
)
// supportedCritical is the set of supported extensions that are understood and processed.
var supportedCritical = map[string]bool{
headerB64: true,
}
// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
//
// The decoding of the constituent items is deferred because we want to marshal
// some members into particular structs rather than generic maps, but at the
// same time we need to receive any extra fields unhandled by this library to
// pass through to consuming code in case it wants to examine them.
type rawHeader map[HeaderKey]*json.RawMessage
// Header represents the read-only JOSE header for JWE/JWS objects.
type Header struct {
KeyID string
JSONWebKey *JSONWebKey
Algorithm string
Nonce string
// Unverified certificate chain parsed from x5c header.
certificates []*x509.Certificate
// Any headers not recognised above get unmarshalled
// from JSON in a generic manner and placed in this map.
ExtraHeaders map[HeaderKey]interface{}
}
// Certificates verifies & returns the certificate chain present
// in the x5c header field of a message, if one was present. Returns
// an error if there was no x5c header present or the chain could
// not be validated with the given verify options.
func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) {
if len(h.certificates) == 0 {
return nil, errors.New("go-jose/go-jose: no x5c header present in message")
}
leaf := h.certificates[0]
if opts.Intermediates == nil {
opts.Intermediates = x509.NewCertPool()
for _, intermediate := range h.certificates[1:] {
opts.Intermediates.AddCert(intermediate)
}
}
return leaf.Verify(opts)
}
func (parsed rawHeader) set(k HeaderKey, v interface{}) error {
b, err := json.Marshal(v)
if err != nil {
return err
}
parsed[k] = makeRawMessage(b)
return nil
}
// getString gets a string from the raw JSON, defaulting to "".
func (parsed rawHeader) getString(k HeaderKey) string {
v, ok := parsed[k]
if !ok || v == nil {
return ""
}
var s string
err := json.Unmarshal(*v, &s)
if err != nil {
return ""
}
return s
}
// getByteBuffer gets a byte buffer from the raw JSON. Returns (nil, nil) if
// not specified.
func (parsed rawHeader) getByteBuffer(k HeaderKey) (*byteBuffer, error) {
v := parsed[k]
if v == nil {
return nil, nil
}
var bb *byteBuffer
err := json.Unmarshal(*v, &bb)
if err != nil {
return nil, err
}
return bb, nil
}
// getAlgorithm extracts parsed "alg" from the raw JSON as a KeyAlgorithm.
func (parsed rawHeader) getAlgorithm() KeyAlgorithm {
return KeyAlgorithm(parsed.getString(headerAlgorithm))
}
// getSignatureAlgorithm extracts parsed "alg" from the raw JSON as a SignatureAlgorithm.
func (parsed rawHeader) getSignatureAlgorithm() SignatureAlgorithm {
return SignatureAlgorithm(parsed.getString(headerAlgorithm))
}
// getEncryption extracts parsed "enc" from the raw JSON.
func (parsed rawHeader) getEncryption() ContentEncryption {
return ContentEncryption(parsed.getString(headerEncryption))
}
// getCompression extracts parsed "zip" from the raw JSON.
func (parsed rawHeader) getCompression() CompressionAlgorithm {
return CompressionAlgorithm(parsed.getString(headerCompression))
}
func (parsed rawHeader) getNonce() string {
return parsed.getString(headerNonce)
}
// getEPK extracts parsed "epk" from the raw JSON.
func (parsed rawHeader) getEPK() (*JSONWebKey, error) {
v := parsed[headerEPK]
if v == nil {
return nil, nil
}
var epk *JSONWebKey
err := json.Unmarshal(*v, &epk)
if err != nil {
return nil, err
}
return epk, nil
}
// getAPU extracts parsed "apu" from the raw JSON.
func (parsed rawHeader) getAPU() (*byteBuffer, error) {
return parsed.getByteBuffer(headerAPU)
}
// getAPV extracts parsed "apv" from the raw JSON.
func (parsed rawHeader) getAPV() (*byteBuffer, error) {
return parsed.getByteBuffer(headerAPV)
}
// getIV extracts parsed "iv" from the raw JSON.
func (parsed rawHeader) getIV() (*byteBuffer, error) {
return parsed.getByteBuffer(headerIV)
}
// getTag extracts parsed "tag" from the raw JSON.
func (parsed rawHeader) getTag() (*byteBuffer, error) {
return parsed.getByteBuffer(headerTag)
}
// getJWK extracts parsed "jwk" from the raw JSON.
func (parsed rawHeader) getJWK() (*JSONWebKey, error) {
v := parsed[headerJWK]
if v == nil {
return nil, nil
}
var jwk *JSONWebKey
err := json.Unmarshal(*v, &jwk)
if err != nil {
return nil, err
}
return jwk, nil
}
// getCritical extracts parsed "crit" from the raw JSON. If omitted, it
// returns an empty slice.
func (parsed rawHeader) getCritical() ([]string, error) {
v := parsed[headerCritical]
if v == nil {
return nil, nil
}
var q []string
err := json.Unmarshal(*v, &q)
if err != nil {
return nil, err
}
return q, nil
}
// getS2C extracts parsed "p2c" from the raw JSON.
func (parsed rawHeader) getP2C() (int, error) {
v := parsed[headerP2C]
if v == nil {
return 0, nil
}
var p2c int
err := json.Unmarshal(*v, &p2c)
if err != nil {
return 0, err
}
return p2c, nil
}
// getS2S extracts parsed "p2s" from the raw JSON.
func (parsed rawHeader) getP2S() (*byteBuffer, error) {
return parsed.getByteBuffer(headerP2S)
}
// getB64 extracts parsed "b64" from the raw JSON, defaulting to true.
func (parsed rawHeader) getB64() (bool, error) {
v := parsed[headerB64]
if v == nil {
return true, nil
}
var b64 bool
err := json.Unmarshal(*v, &b64)
if err != nil {
return true, err
}
return b64, nil
}
// sanitized produces a cleaned-up header object from the raw JSON.
func (parsed rawHeader) sanitized() (h Header, err error) {
for k, v := range parsed {
if v == nil {
continue
}
switch k {
case headerJWK:
var jwk *JSONWebKey
err = json.Unmarshal(*v, &jwk)
if err != nil {
err = fmt.Errorf("failed to unmarshal JWK: %v: %#v", err, string(*v))
return
}
h.JSONWebKey = jwk
case headerKeyID:
var s string
err = json.Unmarshal(*v, &s)
if err != nil {
err = fmt.Errorf("failed to unmarshal key ID: %v: %#v", err, string(*v))
return
}
h.KeyID = s
case headerAlgorithm:
var s string
err = json.Unmarshal(*v, &s)
if err != nil {
err = fmt.Errorf("failed to unmarshal algorithm: %v: %#v", err, string(*v))
return
}
h.Algorithm = s
case headerNonce:
var s string
err = json.Unmarshal(*v, &s)
if err != nil {
err = fmt.Errorf("failed to unmarshal nonce: %v: %#v", err, string(*v))
return
}
h.Nonce = s
case headerX5c:
c := []string{}
err = json.Unmarshal(*v, &c)
if err != nil {
err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
return
}
h.certificates, err = parseCertificateChain(c)
if err != nil {
err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
return
}
default:
if h.ExtraHeaders == nil {
h.ExtraHeaders = map[HeaderKey]interface{}{}
}
var v2 interface{}
err = json.Unmarshal(*v, &v2)
if err != nil {
err = fmt.Errorf("failed to unmarshal value: %v: %#v", err, string(*v))
return
}
h.ExtraHeaders[k] = v2
}
}
return
}
func parseCertificateChain(chain []string) ([]*x509.Certificate, error) {
out := make([]*x509.Certificate, len(chain))
for i, cert := range chain {
raw, err := base64.StdEncoding.DecodeString(cert)
if err != nil {
return nil, err
}
out[i], err = x509.ParseCertificate(raw)
if err != nil {
return nil, err
}
}
return out, nil
}
func (parsed rawHeader) isSet(k HeaderKey) bool {
dvr := parsed[k]
if dvr == nil {
return false
}
var dv interface{}
err := json.Unmarshal(*dvr, &dv)
if err != nil {
return true
}
if dvStr, ok := dv.(string); ok {
return dvStr != ""
}
return true
}
// Merge headers from src into dst, giving precedence to headers from l.
func (parsed rawHeader) merge(src *rawHeader) {
if src == nil {
return
}
for k, v := range *src {
if parsed.isSet(k) {
continue
}
parsed[k] = v
}
}
// Get JOSE name of curve
func curveName(crv elliptic.Curve) (string, error) {
switch crv {
case elliptic.P256():
return "P-256", nil
case elliptic.P384():
return "P-384", nil
case elliptic.P521():
return "P-521", nil
default:
return "", fmt.Errorf("go-jose/go-jose: unsupported/unknown elliptic curve")
}
}
// Get size of curve in bytes
func curveSize(crv elliptic.Curve) int {
bits := crv.Params().BitSize
div := bits / 8
mod := bits % 8
if mod == 0 {
return div
}
return div + 1
}
func makeRawMessage(b []byte) *json.RawMessage {
rm := json.RawMessage(b)
return &rm
}

450
vendor/github.com/go-jose/go-jose/v3/signing.go generated vendored Normal file
View file

@ -0,0 +1,450 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"bytes"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"encoding/base64"
"errors"
"fmt"
"github.com/go-jose/go-jose/v3/json"
)
// NonceSource represents a source of random nonces to go into JWS objects
type NonceSource interface {
Nonce() (string, error)
}
// Signer represents a signer which takes a payload and produces a signed JWS object.
type Signer interface {
Sign(payload []byte) (*JSONWebSignature, error)
Options() SignerOptions
}
// SigningKey represents an algorithm/key used to sign a message.
type SigningKey struct {
Algorithm SignatureAlgorithm
Key interface{}
}
// SignerOptions represents options that can be set when creating signers.
type SignerOptions struct {
NonceSource NonceSource
EmbedJWK bool
// Optional map of additional keys to be inserted into the protected header
// of a JWS object. Some specifications which make use of JWS like to insert
// additional values here. All values must be JSON-serializable.
ExtraHeaders map[HeaderKey]interface{}
}
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
// if necessary. It returns itself and so can be used in a fluent style.
func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
if so.ExtraHeaders == nil {
so.ExtraHeaders = map[HeaderKey]interface{}{}
}
so.ExtraHeaders[k] = v
return so
}
// WithContentType adds a content type ("cty") header and returns the updated
// SignerOptions.
func (so *SignerOptions) WithContentType(contentType ContentType) *SignerOptions {
return so.WithHeader(HeaderContentType, contentType)
}
// WithType adds a type ("typ") header and returns the updated SignerOptions.
func (so *SignerOptions) WithType(typ ContentType) *SignerOptions {
return so.WithHeader(HeaderType, typ)
}
// WithCritical adds the given names to the critical ("crit") header and returns
// the updated SignerOptions.
func (so *SignerOptions) WithCritical(names ...string) *SignerOptions {
if so.ExtraHeaders[headerCritical] == nil {
so.WithHeader(headerCritical, make([]string, 0, len(names)))
}
crit := so.ExtraHeaders[headerCritical].([]string)
so.ExtraHeaders[headerCritical] = append(crit, names...)
return so
}
// WithBase64 adds a base64url-encode payload ("b64") header and returns the updated
// SignerOptions. When the "b64" value is "false", the payload is not base64 encoded.
func (so *SignerOptions) WithBase64(b64 bool) *SignerOptions {
if !b64 {
so.WithHeader(headerB64, b64)
so.WithCritical(headerB64)
}
return so
}
type payloadSigner interface {
signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
}
type payloadVerifier interface {
verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
}
type genericSigner struct {
recipients []recipientSigInfo
nonceSource NonceSource
embedJWK bool
extraHeaders map[HeaderKey]interface{}
}
type recipientSigInfo struct {
sigAlg SignatureAlgorithm
publicKey func() *JSONWebKey
signer payloadSigner
}
func staticPublicKey(jwk *JSONWebKey) func() *JSONWebKey {
return func() *JSONWebKey {
return jwk
}
}
// NewSigner creates an appropriate signer based on the key type
func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) {
return NewMultiSigner([]SigningKey{sig}, opts)
}
// NewMultiSigner creates a signer for multiple recipients
func NewMultiSigner(sigs []SigningKey, opts *SignerOptions) (Signer, error) {
signer := &genericSigner{recipients: []recipientSigInfo{}}
if opts != nil {
signer.nonceSource = opts.NonceSource
signer.embedJWK = opts.EmbedJWK
signer.extraHeaders = opts.ExtraHeaders
}
for _, sig := range sigs {
err := signer.addRecipient(sig.Algorithm, sig.Key)
if err != nil {
return nil, err
}
}
return signer, nil
}
// newVerifier creates a verifier based on the key type
func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
switch verificationKey := verificationKey.(type) {
case ed25519.PublicKey:
return &edEncrypterVerifier{
publicKey: verificationKey,
}, nil
case *rsa.PublicKey:
return &rsaEncrypterVerifier{
publicKey: verificationKey,
}, nil
case *ecdsa.PublicKey:
return &ecEncrypterVerifier{
publicKey: verificationKey,
}, nil
case []byte:
return &symmetricMac{
key: verificationKey,
}, nil
case JSONWebKey:
return newVerifier(verificationKey.Key)
case *JSONWebKey:
return newVerifier(verificationKey.Key)
}
if ov, ok := verificationKey.(OpaqueVerifier); ok {
return &opaqueVerifier{verifier: ov}, nil
}
return nil, ErrUnsupportedKeyType
}
func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
recipient, err := makeJWSRecipient(alg, signingKey)
if err != nil {
return err
}
ctx.recipients = append(ctx.recipients, recipient)
return nil
}
func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
switch signingKey := signingKey.(type) {
case ed25519.PrivateKey:
return newEd25519Signer(alg, signingKey)
case *rsa.PrivateKey:
return newRSASigner(alg, signingKey)
case *ecdsa.PrivateKey:
return newECDSASigner(alg, signingKey)
case []byte:
return newSymmetricSigner(alg, signingKey)
case JSONWebKey:
return newJWKSigner(alg, signingKey)
case *JSONWebKey:
return newJWKSigner(alg, *signingKey)
}
if signer, ok := signingKey.(OpaqueSigner); ok {
return newOpaqueSigner(alg, signer)
}
return recipientSigInfo{}, ErrUnsupportedKeyType
}
func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
recipient, err := makeJWSRecipient(alg, signingKey.Key)
if err != nil {
return recipientSigInfo{}, err
}
if recipient.publicKey != nil && recipient.publicKey() != nil {
// recipient.publicKey is a JWK synthesized for embedding when recipientSigInfo
// was created for the inner key (such as a RSA or ECDSA public key). It contains
// the pub key for embedding, but doesn't have extra params like key id.
publicKey := signingKey
publicKey.Key = recipient.publicKey().Key
recipient.publicKey = staticPublicKey(&publicKey)
// This should be impossible, but let's check anyway.
if !recipient.publicKey().IsPublic() {
return recipientSigInfo{}, errors.New("go-jose/go-jose: public key was unexpectedly not public")
}
}
return recipient, nil
}
func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
obj := &JSONWebSignature{}
obj.payload = payload
obj.Signatures = make([]Signature, len(ctx.recipients))
for i, recipient := range ctx.recipients {
protected := map[HeaderKey]interface{}{
headerAlgorithm: string(recipient.sigAlg),
}
if recipient.publicKey != nil && recipient.publicKey() != nil {
// We want to embed the JWK or set the kid header, but not both. Having a protected
// header that contains an embedded JWK while also simultaneously containing the kid
// header is confusing, and at least in ACME the two are considered to be mutually
// exclusive. The fact that both can exist at the same time is a somewhat unfortunate
// result of the JOSE spec. We've decided that this library will only include one or
// the other to avoid this confusion.
//
// See https://github.com/go-jose/go-jose/issues/157 for more context.
if ctx.embedJWK {
protected[headerJWK] = recipient.publicKey()
} else {
keyID := recipient.publicKey().KeyID
if keyID != "" {
protected[headerKeyID] = keyID
}
}
}
if ctx.nonceSource != nil {
nonce, err := ctx.nonceSource.Nonce()
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: Error generating nonce: %v", err)
}
protected[headerNonce] = nonce
}
for k, v := range ctx.extraHeaders {
protected[k] = v
}
serializedProtected := mustSerializeJSON(protected)
needsBase64 := true
if b64, ok := protected[headerB64]; ok {
if needsBase64, ok = b64.(bool); !ok {
return nil, errors.New("go-jose/go-jose: Invalid b64 header parameter")
}
}
var input bytes.Buffer
input.WriteString(base64.RawURLEncoding.EncodeToString(serializedProtected))
input.WriteByte('.')
if needsBase64 {
input.WriteString(base64.RawURLEncoding.EncodeToString(payload))
} else {
input.Write(payload)
}
signatureInfo, err := recipient.signer.signPayload(input.Bytes(), recipient.sigAlg)
if err != nil {
return nil, err
}
signatureInfo.protected = &rawHeader{}
for k, v := range protected {
b, err := json.Marshal(v)
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: Error marshalling item %#v: %v", k, err)
}
(*signatureInfo.protected)[k] = makeRawMessage(b)
}
obj.Signatures[i] = signatureInfo
}
return obj, nil
}
func (ctx *genericSigner) Options() SignerOptions {
return SignerOptions{
NonceSource: ctx.nonceSource,
EmbedJWK: ctx.embedJWK,
ExtraHeaders: ctx.extraHeaders,
}
}
// Verify validates the signature on the object and returns the payload.
// This function does not support multi-signature, if you desire multi-sig
// verification use VerifyMulti instead.
//
// Be careful when verifying signatures based on embedded JWKs inside the
// payload header. You cannot assume that the key received in a payload is
// trusted.
func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
err := obj.DetachedVerify(obj.payload, verificationKey)
if err != nil {
return nil, err
}
return obj.payload, nil
}
// UnsafePayloadWithoutVerification returns the payload without
// verifying it. The content returned from this function cannot be
// trusted.
func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
return obj.payload
}
// DetachedVerify validates a detached signature on the given payload. In
// most cases, you will probably want to use Verify instead. DetachedVerify
// is only useful if you have a payload and signature that are separated from
// each other.
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
key := tryJWKS(verificationKey, obj.headers()...)
verifier, err := newVerifier(key)
if err != nil {
return err
}
if len(obj.Signatures) > 1 {
return errors.New("go-jose/go-jose: too many signatures in payload; expecting only one")
}
signature := obj.Signatures[0]
headers := signature.mergedHeaders()
critical, err := headers.getCritical()
if err != nil {
return err
}
for _, name := range critical {
if !supportedCritical[name] {
return ErrCryptoFailure
}
}
input, err := obj.computeAuthData(payload, &signature)
if err != nil {
return ErrCryptoFailure
}
alg := headers.getSignatureAlgorithm()
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
return nil
}
return ErrCryptoFailure
}
// VerifyMulti validates (one of the multiple) signatures on the object and
// returns the index of the signature that was verified, along with the signature
// object and the payload. We return the signature and index to guarantee that
// callers are getting the verified value.
func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
if err != nil {
return -1, Signature{}, nil, err
}
return idx, sig, obj.payload, nil
}
// DetachedVerifyMulti validates a detached signature on the given payload with
// a signature/object that has potentially multiple signers. This returns the index
// of the signature that was verified, along with the signature object. We return
// the signature and index to guarantee that callers are getting the verified value.
//
// In most cases, you will probably want to use Verify or VerifyMulti instead.
// DetachedVerifyMulti is only useful if you have a payload and signature that are
// separated from each other, and the signature can have multiple signers at the
// same time.
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
key := tryJWKS(verificationKey, obj.headers()...)
verifier, err := newVerifier(key)
if err != nil {
return -1, Signature{}, err
}
outer:
for i, signature := range obj.Signatures {
headers := signature.mergedHeaders()
critical, err := headers.getCritical()
if err != nil {
continue
}
for _, name := range critical {
if !supportedCritical[name] {
continue outer
}
}
input, err := obj.computeAuthData(payload, &signature)
if err != nil {
continue
}
alg := headers.getSignatureAlgorithm()
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
return i, signature, nil
}
}
return -1, Signature{}, ErrCryptoFailure
}
func (obj JSONWebSignature) headers() []Header {
headers := make([]Header, len(obj.Signatures))
for i, sig := range obj.Signatures {
headers[i] = sig.Header
}
return headers
}

495
vendor/github.com/go-jose/go-jose/v3/symmetric.go generated vendored Normal file
View file

@ -0,0 +1,495 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"errors"
"fmt"
"hash"
"io"
"golang.org/x/crypto/pbkdf2"
josecipher "github.com/go-jose/go-jose/v3/cipher"
)
// RandReader is a cryptographically secure random number generator (stubbed out in tests).
var RandReader = rand.Reader
const (
// RFC7518 recommends a minimum of 1,000 iterations:
// https://tools.ietf.org/html/rfc7518#section-4.8.1.2
// NIST recommends a minimum of 10,000:
// https://pages.nist.gov/800-63-3/sp800-63b.html
// 1Password uses 100,000:
// https://support.1password.com/pbkdf2/
defaultP2C = 100000
// Default salt size: 128 bits
defaultP2SSize = 16
)
// Dummy key cipher for shared symmetric key mode
type symmetricKeyCipher struct {
key []byte // Pre-shared content-encryption key
p2c int // PBES2 Count
p2s []byte // PBES2 Salt Input
}
// Signer/verifier for MAC modes
type symmetricMac struct {
key []byte
}
// Input/output from an AEAD operation
type aeadParts struct {
iv, ciphertext, tag []byte
}
// A content cipher based on an AEAD construction
type aeadContentCipher struct {
keyBytes int
authtagBytes int
getAead func(key []byte) (cipher.AEAD, error)
}
// Random key generator
type randomKeyGenerator struct {
size int
}
// Static key generator
type staticKeyGenerator struct {
key []byte
}
// Create a new content cipher based on AES-GCM
func newAESGCM(keySize int) contentCipher {
return &aeadContentCipher{
keyBytes: keySize,
authtagBytes: 16,
getAead: func(key []byte) (cipher.AEAD, error) {
aes, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
return cipher.NewGCM(aes)
},
}
}
// Create a new content cipher based on AES-CBC+HMAC
func newAESCBC(keySize int) contentCipher {
return &aeadContentCipher{
keyBytes: keySize * 2,
authtagBytes: keySize,
getAead: func(key []byte) (cipher.AEAD, error) {
return josecipher.NewCBCHMAC(key, aes.NewCipher)
},
}
}
// Get an AEAD cipher object for the given content encryption algorithm
func getContentCipher(alg ContentEncryption) contentCipher {
switch alg {
case A128GCM:
return newAESGCM(16)
case A192GCM:
return newAESGCM(24)
case A256GCM:
return newAESGCM(32)
case A128CBC_HS256:
return newAESCBC(16)
case A192CBC_HS384:
return newAESCBC(24)
case A256CBC_HS512:
return newAESCBC(32)
default:
return nil
}
}
// getPbkdf2Params returns the key length and hash function used in
// pbkdf2.Key.
func getPbkdf2Params(alg KeyAlgorithm) (int, func() hash.Hash) {
switch alg {
case PBES2_HS256_A128KW:
return 16, sha256.New
case PBES2_HS384_A192KW:
return 24, sha512.New384
case PBES2_HS512_A256KW:
return 32, sha512.New
default:
panic("invalid algorithm")
}
}
// getRandomSalt generates a new salt of the given size.
func getRandomSalt(size int) ([]byte, error) {
salt := make([]byte, size)
_, err := io.ReadFull(RandReader, salt)
if err != nil {
return nil, err
}
return salt, nil
}
// newSymmetricRecipient creates a JWE encrypter based on AES-GCM key wrap.
func newSymmetricRecipient(keyAlg KeyAlgorithm, key []byte) (recipientKeyInfo, error) {
switch keyAlg {
case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW:
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
default:
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
}
return recipientKeyInfo{
keyAlg: keyAlg,
keyEncrypter: &symmetricKeyCipher{
key: key,
},
}, nil
}
// newSymmetricSigner creates a recipientSigInfo based on the given key.
func newSymmetricSigner(sigAlg SignatureAlgorithm, key []byte) (recipientSigInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch sigAlg {
case HS256, HS384, HS512:
default:
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
return recipientSigInfo{
sigAlg: sigAlg,
signer: &symmetricMac{
key: key,
},
}, nil
}
// Generate a random key for the given content cipher
func (ctx randomKeyGenerator) genKey() ([]byte, rawHeader, error) {
key := make([]byte, ctx.size)
_, err := io.ReadFull(RandReader, key)
if err != nil {
return nil, rawHeader{}, err
}
return key, rawHeader{}, nil
}
// Key size for random generator
func (ctx randomKeyGenerator) keySize() int {
return ctx.size
}
// Generate a static key (for direct mode)
func (ctx staticKeyGenerator) genKey() ([]byte, rawHeader, error) {
cek := make([]byte, len(ctx.key))
copy(cek, ctx.key)
return cek, rawHeader{}, nil
}
// Key size for static generator
func (ctx staticKeyGenerator) keySize() int {
return len(ctx.key)
}
// Get key size for this cipher
func (ctx aeadContentCipher) keySize() int {
return ctx.keyBytes
}
// Encrypt some data
func (ctx aeadContentCipher) encrypt(key, aad, pt []byte) (*aeadParts, error) {
// Get a new AEAD instance
aead, err := ctx.getAead(key)
if err != nil {
return nil, err
}
// Initialize a new nonce
iv := make([]byte, aead.NonceSize())
_, err = io.ReadFull(RandReader, iv)
if err != nil {
return nil, err
}
ciphertextAndTag := aead.Seal(nil, iv, pt, aad)
offset := len(ciphertextAndTag) - ctx.authtagBytes
return &aeadParts{
iv: iv,
ciphertext: ciphertextAndTag[:offset],
tag: ciphertextAndTag[offset:],
}, nil
}
// Decrypt some data
func (ctx aeadContentCipher) decrypt(key, aad []byte, parts *aeadParts) ([]byte, error) {
aead, err := ctx.getAead(key)
if err != nil {
return nil, err
}
if len(parts.iv) != aead.NonceSize() || len(parts.tag) < ctx.authtagBytes {
return nil, ErrCryptoFailure
}
return aead.Open(nil, parts.iv, append(parts.ciphertext, parts.tag...), aad)
}
// Encrypt the content encryption key.
func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
switch alg {
case DIRECT:
return recipientInfo{
header: &rawHeader{},
}, nil
case A128GCMKW, A192GCMKW, A256GCMKW:
aead := newAESGCM(len(ctx.key))
parts, err := aead.encrypt(ctx.key, []byte{}, cek)
if err != nil {
return recipientInfo{}, err
}
header := &rawHeader{}
if err = header.set(headerIV, newBuffer(parts.iv)); err != nil {
return recipientInfo{}, err
}
if err = header.set(headerTag, newBuffer(parts.tag)); err != nil {
return recipientInfo{}, err
}
return recipientInfo{
header: header,
encryptedKey: parts.ciphertext,
}, nil
case A128KW, A192KW, A256KW:
block, err := aes.NewCipher(ctx.key)
if err != nil {
return recipientInfo{}, err
}
jek, err := josecipher.KeyWrap(block, cek)
if err != nil {
return recipientInfo{}, err
}
return recipientInfo{
encryptedKey: jek,
header: &rawHeader{},
}, nil
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
if len(ctx.p2s) == 0 {
salt, err := getRandomSalt(defaultP2SSize)
if err != nil {
return recipientInfo{}, err
}
ctx.p2s = salt
}
if ctx.p2c <= 0 {
ctx.p2c = defaultP2C
}
// salt is UTF8(Alg) || 0x00 || Salt Input
salt := bytes.Join([][]byte{[]byte(alg), ctx.p2s}, []byte{0x00})
// derive key
keyLen, h := getPbkdf2Params(alg)
key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h)
// use AES cipher with derived key
block, err := aes.NewCipher(key)
if err != nil {
return recipientInfo{}, err
}
jek, err := josecipher.KeyWrap(block, cek)
if err != nil {
return recipientInfo{}, err
}
header := &rawHeader{}
if err = header.set(headerP2C, ctx.p2c); err != nil {
return recipientInfo{}, err
}
if err = header.set(headerP2S, newBuffer(ctx.p2s)); err != nil {
return recipientInfo{}, err
}
return recipientInfo{
encryptedKey: jek,
header: header,
}, nil
}
return recipientInfo{}, ErrUnsupportedAlgorithm
}
// Decrypt the content encryption key.
func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
switch headers.getAlgorithm() {
case DIRECT:
cek := make([]byte, len(ctx.key))
copy(cek, ctx.key)
return cek, nil
case A128GCMKW, A192GCMKW, A256GCMKW:
aead := newAESGCM(len(ctx.key))
iv, err := headers.getIV()
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid IV: %v", err)
}
tag, err := headers.getTag()
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid tag: %v", err)
}
parts := &aeadParts{
iv: iv.bytes(),
ciphertext: recipient.encryptedKey,
tag: tag.bytes(),
}
cek, err := aead.decrypt(ctx.key, []byte{}, parts)
if err != nil {
return nil, err
}
return cek, nil
case A128KW, A192KW, A256KW:
block, err := aes.NewCipher(ctx.key)
if err != nil {
return nil, err
}
cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
if err != nil {
return nil, err
}
return cek, nil
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
p2s, err := headers.getP2S()
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: %v", err)
}
if p2s == nil || len(p2s.data) == 0 {
return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: must be present")
}
p2c, err := headers.getP2C()
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: %v", err)
}
if p2c <= 0 {
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer")
}
// salt is UTF8(Alg) || 0x00 || Salt Input
alg := headers.getAlgorithm()
salt := bytes.Join([][]byte{[]byte(alg), p2s.bytes()}, []byte{0x00})
// derive key
keyLen, h := getPbkdf2Params(alg)
key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h)
// use AES cipher with derived key
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
if err != nil {
return nil, err
}
return cek, nil
}
return nil, ErrUnsupportedAlgorithm
}
// Sign the given payload
func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
mac, err := ctx.hmac(payload, alg)
if err != nil {
return Signature{}, errors.New("go-jose/go-jose: failed to compute hmac")
}
return Signature{
Signature: mac,
protected: &rawHeader{},
}, nil
}
// Verify the given payload
func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
expected, err := ctx.hmac(payload, alg)
if err != nil {
return errors.New("go-jose/go-jose: failed to compute hmac")
}
if len(mac) != len(expected) {
return errors.New("go-jose/go-jose: invalid hmac")
}
match := subtle.ConstantTimeCompare(mac, expected)
if match != 1 {
return errors.New("go-jose/go-jose: invalid hmac")
}
return nil
}
// Compute the HMAC based on the given alg value
func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) {
var hash func() hash.Hash
switch alg {
case HS256:
hash = sha256.New
case HS384:
hash = sha512.New384
case HS512:
hash = sha512.New
default:
return nil, ErrUnsupportedAlgorithm
}
hmac := hmac.New(hash, ctx.key)
// According to documentation, Write() on hash never fails
_, _ = hmac.Write(payload)
return hmac.Sum(nil), nil
}

View file

@ -112,7 +112,7 @@ func flattenComposite(errs *CompositeError) *CompositeError {
for _, er := range errs.Errors {
switch e := er.(type) {
case *CompositeError:
if len(e.Errors) > 0 {
if e != nil && len(e.Errors) > 0 {
flat := flattenComposite(e)
if len(flat.Errors) > 0 {
res = append(res, flat.Errors...)

View file

@ -1,15 +0,0 @@
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- 1.14.x
- 1.15.x
install:
- GO111MODULE=off go get -u gotest.tools/gotestsum
env:
- GO111MODULE=on
language: go
notifications:
slack:
secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw=
script:
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...

View file

@ -1,8 +1,6 @@
linters-settings:
govet:
check-shadowing: true
golint:
min-confidence: 0
gocyclo:
min-complexity: 30
maligned:
@ -12,6 +10,8 @@ linters-settings:
goconst:
min-len: 2
min-occurrences: 4
paralleltest:
ignore-missing: true
linters:
enable-all: true
disable:
@ -39,3 +39,12 @@ linters:
- nestif
- godot
- errorlint
- varcheck
- interfacer
- deadcode
- golint
- ifshort
- structcheck
- nosnakecase
- varnamelen
- exhaustruct

View file

@ -1,24 +0,0 @@
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- 1.14.x
- 1.x
install:
- go get gotest.tools/gotestsum
jobs:
include:
# include linting job, but only for latest go version and amd64 arch
- go: 1.x
arch: amd64
install:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
script:
- golangci-lint run --new-from-rev master
env:
- GO111MODULE=on
language: go
notifications:
slack:
secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ=
script:
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...

View file

@ -7,8 +7,8 @@ import (
)
const (
defaultHttpPort = ":80"
defaultHttpsPort = ":443"
defaultHTTPPort = ":80"
defaultHTTPSPort = ":443"
)
// Regular expressions used by the normalizations
@ -18,18 +18,24 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
// NormalizeURL will normalize the specified URL
// This was added to replace a previous call to the no longer maintained purell library:
// The call that was used looked like the following:
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
//
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
//
// To explain all that was included in the call above, purell.FlagsSafe was really just the following:
// - FlagLowercaseScheme
// - FlagLowercaseHost
// - FlagRemoveDefaultPort
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
// - FlagLowercaseScheme
// - FlagLowercaseHost
// - FlagRemoveDefaultPort
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
//
// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment.
func NormalizeURL(u *url.URL) {
lowercaseScheme(u)
lowercaseHost(u)
removeDefaultPort(u)
removeDuplicateSlashes(u)
u.RawPath = ""
u.RawFragment = ""
}
func lowercaseScheme(u *url.URL) {
@ -48,7 +54,7 @@ func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) {
return ""
}
return val

View file

@ -10,8 +10,5 @@ Session Agent during the TLS handshake, and to encrypt traffic to the peer
after the TLS handshake is complete.
This repository contains the source code for the Secure Session Agent's Go
client libraries, which allow gRPC-Go applications to use the Secure Session
Agent. This repository supports the Bazel and Golang build systems.
All code in this repository is experimental and subject to change. We do not
guarantee API stability at this time.
client libraries, which allow gRPC and HTTP Go applications to use the Secure Session
Agent.

View file

@ -21,50 +21,27 @@ package service
import (
"context"
"net"
"os"
"strings"
"sync"
"time"
"google.golang.org/appengine"
"google.golang.org/appengine/socket"
grpc "google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
)
// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A.
const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER"
var (
// appEngineDialerHook is an AppEngine-specific dial option that is set
// during init time. If nil, then the application is not running on Google
// AppEngine.
appEngineDialerHook func(context.Context) grpc.DialOption
// mu guards hsConnMap and hsDialer.
mu sync.Mutex
// hsConnMap represents a mapping from an S2A handshaker service address
// to a corresponding connection to an S2A handshaker service instance.
hsConnMap = make(map[string]*grpc.ClientConn)
// hsDialer will be reassigned in tests.
hsDialer = grpc.Dial
hsDialer = grpc.DialContext
)
func init() {
if !appengine.IsAppEngine() && !appengine.IsDevAppServer() {
return
}
appEngineDialerHook = func(ctx context.Context) grpc.DialOption {
return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
return socket.DialTimeout(ctx, "tcp", addr, timeout)
})
}
}
// Dial dials the S2A handshaker service. If a connection has already been
// established, this function returns it. Otherwise, a new connection is
// created.
func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) {
func Dial(ctx context.Context, handshakerServiceAddress string, transportCreds credentials.TransportCredentials) (*grpc.ClientConn, error) {
mu.Lock()
defer mu.Unlock()
@ -72,17 +49,14 @@ func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) {
if !ok {
// Create a new connection to the S2A handshaker service. Note that
// this connection stays open until the application is closed.
grpcOpts := []grpc.DialOption{
grpc.WithInsecure(),
}
if enableAppEngineDialer() && appEngineDialerHook != nil {
if grpclog.V(1) {
grpclog.Info("Using AppEngine-specific dialer to talk to S2A.")
}
grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background()))
var grpcOpts []grpc.DialOption
if transportCreds != nil {
grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(transportCreds))
} else {
grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
var err error
hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...)
hsConn, err = hsDialer(ctx, handshakerServiceAddress, grpcOpts...)
if err != nil {
return nil, err
}
@ -90,10 +64,3 @@ func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) {
}
return hsConn, nil
}
func enableAppEngineDialer() bool {
if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" {
return true
}
return false
}

View file

@ -83,13 +83,15 @@ func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete ch
t.ensureProcessSessionTickets.Done()
}
}()
hsConn, err := service.Dial(t.hsAddr)
ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout)
defer cancel()
// The transportCreds only needs to be set when talking to S2AV2 and also
// if mTLS is required.
hsConn, err := service.Dial(ctx, t.hsAddr, nil)
if err != nil {
return err
}
client := s2apb.NewS2AServiceClient(hsConn)
ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout)
defer cancel()
session, err := client.SetUpSession(ctx)
if err != nil {
return err

Some files were not shown because too many files have changed in this diff Show more